file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
Transformer_prac.py | Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์ด์ง dim0๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_output, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
for epoch in range(20):
optimizer.zero_grad()
| enc_input, dec_input, target_batch = make_batch(sentence)
outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_input, dec_input)
loss = criterion(outputs, target_batch.contiguous().view(-1))
print('Epoch:','%04d'%(epoch+1), 'cost = '.format(loss))
loss.backward()
| random_line_split |
|
importer.go | the cfg.
ModifiedBy string `gae:"modified_by"`
// ModifiedTS is the time when this entity was last modified.
ModifiedTS time.Time `gae:"modified_ts"`
}
var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`)
// GroupBundle is a map where k: groupName, v: list of identities belonging to group k.
type GroupBundle = map[string][]identity.Identity
// GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore.
//
// Returns GroupImporterConfig entity if present.
// Returns datastore.ErrNoSuchEntity if the entity is not present.
// Returns annotated error for all other errors.
func GetGroupImporterConfig(ctx context.Context) (*GroupImporterConfig, error) {
groupsCfg := &GroupImporterConfig{
Kind: "GroupImporterConfig",
ID: "config",
}
switch err := datastore.Get(ctx, groupsCfg); {
case err == nil:
return groupsCfg, nil
case err == datastore.ErrNoSuchEntity:
return nil, err
default:
return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err()
}
}
// IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries.
// expected to be called in an auth context of the upload PUT request.
//
// returns
//
// []string - list of modified groups
// int64 - authDBRevision
// error
// proto translation error
// entry is nil
// entry not found in tarball upload config
// unauthorized uploader
// bad tarball structure
func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) | if entry == nil {
return nil, 0, errors.New("entry is nil")
}
if entry.Name == "" {
return nil, 0, errors.New("entry not found in tarball upload names")
}
if !contains(caller.Email(), entry.AuthorizedUploader) {
return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email()))
}
bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups())
if err != nil {
return nil, 0, errors.Annotate(err, "bad tarball").Err()
}
return importBundles(ctx, bundles, caller, nil)
}
// loadTarball unzips tarball with groups and deserializes them.
func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) {
// map looks like: K: system, V: { K: groupName, V: []identities }
bundles := make(map[string]GroupBundle)
entries, err := extractTarArchive(content)
if err != nil {
return nil, err
}
// verify system/groupname and then parse blob if valid
for filename, fileobj := range entries {
chunks := strings.Split(filename, "/")
if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) {
logging.Warningf(ctx, "Skipping file %s, not a valid name", filename)
continue
}
if groups != nil && !contains(filename, groups) {
continue
}
system := chunks[0]
if !contains(system, systems) {
logging.Warningf(ctx, "Skipping file %s, not allowed", filename)
continue
}
identities, err := loadGroupFile(string(fileobj), domain)
if err != nil {
return nil, err
}
if _, ok := bundles[system]; !ok {
bundles[system] = make(GroupBundle)
}
bundles[system][filename] = identities
}
return bundles, nil
}
func loadGroupFile(identities string, domain string) ([]identity.Identity, error) {
members := make(map[identity.Identity]bool)
memsSplit := strings.Split(identities, "\n")
for _, uid := range memsSplit {
uid = strings.TrimSpace(uid)
if uid == "" {
continue
}
var ident string
if domain == "" {
ident = fmt.Sprintf("user:%s", uid)
} else {
ident = fmt.Sprintf("user:%s@%s", uid, domain)
}
emailIdent, err := identity.MakeIdentity(ident)
if err != nil {
return nil, err
}
members[emailIdent] = true
}
membersSorted := make([]identity.Identity, 0, len(members))
for mem := range members {
membersSorted = append(membersSorted, mem)
}
sort.Slice(membersSorted, func(i, j int) bool {
return membersSorted[i].Value() < membersSorted[j].Value()
})
return membersSorted, nil
}
// importBundles imports given set of bundles all at once.
// A bundle is a map with groups that is the result of a processing of some tarball.
// A bundle specifies the desired state of all groups under some system, e.g.
// importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups.
//
// Group names in the bundle are specified in their full prefixed form (with
// system name prefix). An example of expected 'bundles':
//
// {
// 'ldap': {
// 'ldap/group': [Identity(...), Identity(...)],
// },
// }
//
// Args:
//
// bundles: map system name -> GroupBundle
// providedBy: auth.Identity to put in modifiedBy or createdBy fields.
//
// Returns:
//
// (list of modified groups,
// new AuthDB revision number or 0 if no changes,
// error if issue with writing entities).
func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) {
// Nothing to process.
if len(bundles) == 0 {
return []string{}, 0, nil
}
getAuthDBRevision := func(ctx context.Context) (int64, error) {
state, err := GetReplicationState(ctx)
switch {
case err == datastore.ErrNoSuchEntity:
return 0, nil
case err != nil:
return -1, err
default:
return state.AuthDBRev, nil
}
}
// Fetches all existing groups and AuthDB revision number.
groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) {
err = datastore.RunInTransaction(ctx, func(ctx context.Context) error {
groups, err := GetAllAuthGroups(ctx)
if err != nil {
return err
}
gMap = make(map[string]*AuthGroup, len(groups))
for _, g := range groups {
gMap[g.ID] = g
}
rev, err = getAuthDBRevision(ctx)
if err != nil {
return errors.Annotate(err, "couldn't get AuthDBRev").Err()
}
return nil
}, nil)
return gMap, rev, err
}
// Transactionally puts and deletes a bunch of entities.
applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error {
// Runs in transaction.
return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error {
rev, err := getAuthDBRevision(ctx)
if err != nil {
return err
}
// DB changed between transactions try again.
if rev != expectedRevision {
return errors.New("revision numbers don't match")
}
for _, e := range entitiesToPut {
if err := cae(e, ts, providedBy, false); err != nil {
return err
}
}
for _, e := range entitiesToDelete {
if err := cae(e, ts, providedBy, true); err != nil {
return err
}
}
return nil
})
}
updatedGroups := stringset.New(0)
revision := int64(0)
loopCount := 0
var groups map[string]*AuthGroup
var err error
// Try to apply the change in batches until it lands completely or deadline
// happens. Split each batch update into two transactions (assuming AuthDB
// changes infrequently) to avoid reading and writing too much stuff from
// within a single transaction (and to avoid keeping the transaction open while
// calculating the diff).
for {
| {
g, err := GetGroupImporterConfig(ctx)
if err != nil {
return nil, 0, err
}
gConfigProto, err := g.ToProto()
if err != nil {
return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err()
}
caller := auth.CurrentIdentity(ctx)
var entry *configspb.GroupImporterConfig_TarballUploadEntry
// make sure that tarball_upload entry we're looking for is specified in config
for _, tbu := range gConfigProto.GetTarballUpload() {
if tbu.Name == name {
entry = tbu
break
}
}
| identifier_body |
importer.go | // including removal of groups that are on the server, but no longer present in
// the tarball.
// Plain list format should have one userid per line and can only describe a single
// group in a single system. Such groups will be added to 'external/*' groups
// namespace. Removing such group from importer config will remove it from
// service too.
// The service can also be configured to accept tarball uploads (instead of
// fetching them). Fetched and uploaded tarballs are handled in the exact same way,
// in particular all caveats related to external group system names apply.
// GroupImporterConfig is a singleton entity that contains the contents of the imports.cfg file.
type GroupImporterConfig struct {
Kind string `gae:"$kind,GroupImporterConfig"`
ID string `gae:"$id,config"`
// ConfigProto is the plaintext copy of the config found at imports.cfg.
ConfigProto string `gae:"config_proto"`
// ConfigRevision is revision version of the config found at imports.cfg.
ConfigRevision []byte `gae:"config_revision"`
// ModifiedBy is the email of the user who modified the cfg.
ModifiedBy string `gae:"modified_by"`
// ModifiedTS is the time when this entity was last modified.
ModifiedTS time.Time `gae:"modified_ts"`
}
var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`)
// GroupBundle is a map where k: groupName, v: list of identities belonging to group k.
type GroupBundle = map[string][]identity.Identity
// GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore.
//
// Returns GroupImporterConfig entity if present.
// Returns datastore.ErrNoSuchEntity if the entity is not present.
// Returns annotated error for all other errors.
func GetGroupImporterConfig(ctx context.Context) (*GroupImporterConfig, error) {
groupsCfg := &GroupImporterConfig{
Kind: "GroupImporterConfig",
ID: "config",
}
switch err := datastore.Get(ctx, groupsCfg); {
case err == nil:
return groupsCfg, nil
case err == datastore.ErrNoSuchEntity:
return nil, err
default:
return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err()
}
}
// IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries.
// expected to be called in an auth context of the upload PUT request.
//
// returns
//
// []string - list of modified groups
// int64 - authDBRevision
// error
// proto translation error
// entry is nil
// entry not found in tarball upload config
// unauthorized uploader
// bad tarball structure
func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) {
g, err := GetGroupImporterConfig(ctx)
if err != nil {
return nil, 0, err
}
gConfigProto, err := g.ToProto()
if err != nil {
return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err()
}
caller := auth.CurrentIdentity(ctx)
var entry *configspb.GroupImporterConfig_TarballUploadEntry
// make sure that tarball_upload entry we're looking for is specified in config
for _, tbu := range gConfigProto.GetTarballUpload() {
if tbu.Name == name {
entry = tbu
break
}
}
if entry == nil {
return nil, 0, errors.New("entry is nil")
}
if entry.Name == "" {
return nil, 0, errors.New("entry not found in tarball upload names")
}
if !contains(caller.Email(), entry.AuthorizedUploader) {
return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email()))
}
bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups())
if err != nil {
return nil, 0, errors.Annotate(err, "bad tarball").Err()
}
return importBundles(ctx, bundles, caller, nil)
}
// loadTarball unzips tarball with groups and deserializes them.
func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) {
// map looks like: K: system, V: { K: groupName, V: []identities }
bundles := make(map[string]GroupBundle)
entries, err := extractTarArchive(content)
if err != nil {
return nil, err
}
// verify system/groupname and then parse blob if valid
for filename, fileobj := range entries {
chunks := strings.Split(filename, "/")
if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) {
logging.Warningf(ctx, "Skipping file %s, not a valid name", filename)
continue
}
if groups != nil && !contains(filename, groups) {
continue
}
system := chunks[0]
if !contains(system, systems) {
logging.Warningf(ctx, "Skipping file %s, not allowed", filename)
continue
}
identities, err := loadGroupFile(string(fileobj), domain)
if err != nil {
return nil, err
}
if _, ok := bundles[system]; !ok {
bundles[system] = make(GroupBundle)
}
bundles[system][filename] = identities
}
return bundles, nil
}
func loadGroupFile(identities string, domain string) ([]identity.Identity, error) {
members := make(map[identity.Identity]bool)
memsSplit := strings.Split(identities, "\n")
for _, uid := range memsSplit {
uid = strings.TrimSpace(uid)
if uid == "" {
continue
}
var ident string
if domain == "" {
ident = fmt.Sprintf("user:%s", uid)
} else {
ident = fmt.Sprintf("user:%s@%s", uid, domain)
}
emailIdent, err := identity.MakeIdentity(ident)
if err != nil {
return nil, err
}
members[emailIdent] = true
}
membersSorted := make([]identity.Identity, 0, len(members))
for mem := range members {
membersSorted = append(membersSorted, mem)
}
sort.Slice(membersSorted, func(i, j int) bool {
return membersSorted[i].Value() < membersSorted[j].Value()
})
return membersSorted, nil
}
// importBundles imports given set of bundles all at once.
// A bundle is a map with groups that is the result of a processing of some tarball.
// A bundle specifies the desired state of all groups under some system, e.g.
// importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups.
//
// Group names in the bundle are specified in their full prefixed form (with
// system name prefix). An example of expected 'bundles':
//
// {
// 'ldap': {
// 'ldap/group': [Identity(...), Identity(...)],
// },
// }
//
// Args:
//
// bundles: map system name -> GroupBundle
// providedBy: auth.Identity to put in modifiedBy or createdBy fields.
//
// Returns:
//
// (list of modified groups,
// new AuthDB revision number or 0 if no changes,
// error if issue with writing entities).
func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) {
// Nothing to process.
if len(bundles) == 0 {
return []string{}, 0, nil
}
getAuthDBRevision := func(ctx context.Context) (int64, error) {
state, err := GetReplicationState(ctx)
switch {
case err == datastore.ErrNoSuchEntity:
return 0, nil
case err != nil:
return -1, err
default:
return state.AuthDBRev, nil
}
}
// Fetches all existing groups and AuthDB revision number.
groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) {
err = datastore.RunInTransaction(ctx, func(ctx context.Context) error {
groups, err := GetAllAuthGroups(ctx)
if err != nil {
return err
}
gMap = make(map[string]*AuthGroup, len(groups))
for _, g := range groups {
gMap[g.ID] = g
}
rev, err = getAuthDBRevision(ctx)
if err != nil {
return errors.Annotate(err, "couldn't get AuthDBRev").Err()
}
return nil
}, nil)
return gMap, rev, err
}
// Transactionally puts and deletes a bunch of entities.
apply |
// Each tarball may have groups from multiple external systems, but groups from
// some external system must not be split between multiple tarballs. When importer
// sees <external group system name>/* in a tarball, it modifies group list from
// that system on the server to match group list in the tarball _exactly_, | random_line_split |
|
importer.go | nil, err
}
if _, ok := bundles[system]; !ok {
bundles[system] = make(GroupBundle)
}
bundles[system][filename] = identities
}
return bundles, nil
}
func loadGroupFile(identities string, domain string) ([]identity.Identity, error) {
members := make(map[identity.Identity]bool)
memsSplit := strings.Split(identities, "\n")
for _, uid := range memsSplit {
uid = strings.TrimSpace(uid)
if uid == "" {
continue
}
var ident string
if domain == "" {
ident = fmt.Sprintf("user:%s", uid)
} else {
ident = fmt.Sprintf("user:%s@%s", uid, domain)
}
emailIdent, err := identity.MakeIdentity(ident)
if err != nil {
return nil, err
}
members[emailIdent] = true
}
membersSorted := make([]identity.Identity, 0, len(members))
for mem := range members {
membersSorted = append(membersSorted, mem)
}
sort.Slice(membersSorted, func(i, j int) bool {
return membersSorted[i].Value() < membersSorted[j].Value()
})
return membersSorted, nil
}
// importBundles imports given set of bundles all at once.
// A bundle is a map with groups that is the result of a processing of some tarball.
// A bundle specifies the desired state of all groups under some system, e.g.
// importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups.
//
// Group names in the bundle are specified in their full prefixed form (with
// system name prefix). An example of expected 'bundles':
//
// {
// 'ldap': {
// 'ldap/group': [Identity(...), Identity(...)],
// },
// }
//
// Args:
//
// bundles: map system name -> GroupBundle
// providedBy: auth.Identity to put in modifiedBy or createdBy fields.
//
// Returns:
//
// (list of modified groups,
// new AuthDB revision number or 0 if no changes,
// error if issue with writing entities).
func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) {
// Nothing to process.
if len(bundles) == 0 {
return []string{}, 0, nil
}
getAuthDBRevision := func(ctx context.Context) (int64, error) {
state, err := GetReplicationState(ctx)
switch {
case err == datastore.ErrNoSuchEntity:
return 0, nil
case err != nil:
return -1, err
default:
return state.AuthDBRev, nil
}
}
// Fetches all existing groups and AuthDB revision number.
groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) {
err = datastore.RunInTransaction(ctx, func(ctx context.Context) error {
groups, err := GetAllAuthGroups(ctx)
if err != nil {
return err
}
gMap = make(map[string]*AuthGroup, len(groups))
for _, g := range groups {
gMap[g.ID] = g
}
rev, err = getAuthDBRevision(ctx)
if err != nil {
return errors.Annotate(err, "couldn't get AuthDBRev").Err()
}
return nil
}, nil)
return gMap, rev, err
}
// Transactionally puts and deletes a bunch of entities.
applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error {
// Runs in transaction.
return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error {
rev, err := getAuthDBRevision(ctx)
if err != nil {
return err
}
// DB changed between transactions try again.
if rev != expectedRevision {
return errors.New("revision numbers don't match")
}
for _, e := range entitiesToPut {
if err := cae(e, ts, providedBy, false); err != nil {
return err
}
}
for _, e := range entitiesToDelete {
if err := cae(e, ts, providedBy, true); err != nil {
return err
}
}
return nil
})
}
updatedGroups := stringset.New(0)
revision := int64(0)
loopCount := 0
var groups map[string]*AuthGroup
var err error
// Try to apply the change in batches until it lands completely or deadline
// happens. Split each batch update into two transactions (assuming AuthDB
// changes infrequently) to avoid reading and writing too much stuff from
// within a single transaction (and to avoid keeping the transaction open while
// calculating the diff).
for {
// Use same timestamp everywhere to reflect that groups were imported
// atomically within a single transaction.
ts := time.Now().UTC()
loopCount += 1
groups, revision, err = groupsSnapshot(ctx)
if err != nil {
return nil, revision, err
}
// For testing purposes only.
if testHook != nil && loopCount == 2 {
testHook()
}
entitiesToPut := []*AuthGroup{}
entitiesToDel := []*AuthGroup{}
for sys := range bundles {
iGroups := bundles[sys]
toPut, toDel := prepareImport(ctx, sys, groups, iGroups)
entitiesToPut = append(entitiesToPut, toPut...)
entitiesToDel = append(entitiesToDel, toDel...)
}
if len(entitiesToPut) == 0 && len(entitiesToDel) == 0 {
logging.Infof(ctx, "nothing to do")
break
}
// An `applyImport` transaction can touch at most 500 entities. Cap the
// number of entities we create/delete by 200 each since we attach a historical
// entity to each entity. The rest will be updated on the next cycle of the loop.
// This is safe to do since:
// * Imported groups are "leaf" groups (have no subgroups) and can be added
// in arbitrary order without worrying about referential integrity.
// * Deleted groups are guaranteed to be unreferenced by `prepareImport`
// and can be deleted in arbitrary order as well.
truncated := false
// Both these operations happen in the same transaction so we have
// to trim it to make sure the total is <= 200.
if len(entitiesToPut) > 200 {
entitiesToPut = entitiesToPut[:200]
entitiesToDel = nil
truncated = true
} else if len(entitiesToPut)+len(entitiesToDel) > 200 {
entitiesToDel = entitiesToDel[:200-len(entitiesToPut)]
truncated = true
}
// Log what we are about to do to help debugging transaction errors.
logging.Infof(ctx, "Preparing AuthDB rev %d with %d puts and %d deletes:", revision+1, len(entitiesToPut), len(entitiesToDel))
for _, e := range entitiesToPut {
logging.Infof(ctx, "U %s", e.ID)
updatedGroups.Add(e.ID)
}
for _, e := range entitiesToDel {
logging.Infof(ctx, "D %s", e.ID)
updatedGroups.Add(e.ID)
}
// Land the change iff the current AuthDB revision is still == `revision`.
err := applyImport(revision, entitiesToPut, entitiesToDel, ts)
if err != nil && strings.Contains(err.Error(), "revision numbers don't match") {
logging.Warningf(ctx, "authdb changed between transactions, retrying...")
continue
} else if err != nil {
logging.Errorf(ctx, "couldn't apply changes to datastore entities %s", err.Error())
return nil, revision, err
}
// The new revision has landed
revision += 1
if truncated {
logging.Infof(ctx, "going for another round to push the rest of the groups")
time.Sleep(time.Second * 5)
continue
}
logging.Infof(ctx, "Done")
break
}
if len(updatedGroups) > 0 {
return updatedGroups.ToSortedSlice(), int64(revision), nil
}
return nil, 0, nil
}
// prepareImport compares the bundle given to the what is currently present in datastore
// to get the operations for all the groups.
func prepareImport(ctx context.Context, systemName string, existingGroups map[string]*AuthGroup, iGroups GroupBundle) (toPut []*AuthGroup, toDel []*AuthGroup) {
systemGroups := []string{}
iGroupsSet := stringset.New(len(iGroups))
for gID := range existingGroups {
if strings.HasPrefix(gID, fmt.Sprintf("%s/", systemName)) | {
systemGroups = append(systemGroups, gID)
} | conditional_block |
|
importer.go | the cfg.
ModifiedBy string `gae:"modified_by"`
// ModifiedTS is the time when this entity was last modified.
ModifiedTS time.Time `gae:"modified_ts"`
}
var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`)
// GroupBundle is a map where k: groupName, v: list of identities belonging to group k.
type GroupBundle = map[string][]identity.Identity
// GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore.
//
// Returns GroupImporterConfig entity if present.
// Returns datastore.ErrNoSuchEntity if the entity is not present.
// Returns annotated error for all other errors.
func | (ctx context.Context) (*GroupImporterConfig, error) {
groupsCfg := &GroupImporterConfig{
Kind: "GroupImporterConfig",
ID: "config",
}
switch err := datastore.Get(ctx, groupsCfg); {
case err == nil:
return groupsCfg, nil
case err == datastore.ErrNoSuchEntity:
return nil, err
default:
return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err()
}
}
// IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries.
// expected to be called in an auth context of the upload PUT request.
//
// returns
//
// []string - list of modified groups
// int64 - authDBRevision
// error
// proto translation error
// entry is nil
// entry not found in tarball upload config
// unauthorized uploader
// bad tarball structure
func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) {
g, err := GetGroupImporterConfig(ctx)
if err != nil {
return nil, 0, err
}
gConfigProto, err := g.ToProto()
if err != nil {
return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err()
}
caller := auth.CurrentIdentity(ctx)
var entry *configspb.GroupImporterConfig_TarballUploadEntry
// make sure that tarball_upload entry we're looking for is specified in config
for _, tbu := range gConfigProto.GetTarballUpload() {
if tbu.Name == name {
entry = tbu
break
}
}
if entry == nil {
return nil, 0, errors.New("entry is nil")
}
if entry.Name == "" {
return nil, 0, errors.New("entry not found in tarball upload names")
}
if !contains(caller.Email(), entry.AuthorizedUploader) {
return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email()))
}
bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups())
if err != nil {
return nil, 0, errors.Annotate(err, "bad tarball").Err()
}
return importBundles(ctx, bundles, caller, nil)
}
// loadTarball unzips tarball with groups and deserializes them.
func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) {
// map looks like: K: system, V: { K: groupName, V: []identities }
bundles := make(map[string]GroupBundle)
entries, err := extractTarArchive(content)
if err != nil {
return nil, err
}
// verify system/groupname and then parse blob if valid
for filename, fileobj := range entries {
chunks := strings.Split(filename, "/")
if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) {
logging.Warningf(ctx, "Skipping file %s, not a valid name", filename)
continue
}
if groups != nil && !contains(filename, groups) {
continue
}
system := chunks[0]
if !contains(system, systems) {
logging.Warningf(ctx, "Skipping file %s, not allowed", filename)
continue
}
identities, err := loadGroupFile(string(fileobj), domain)
if err != nil {
return nil, err
}
if _, ok := bundles[system]; !ok {
bundles[system] = make(GroupBundle)
}
bundles[system][filename] = identities
}
return bundles, nil
}
func loadGroupFile(identities string, domain string) ([]identity.Identity, error) {
members := make(map[identity.Identity]bool)
memsSplit := strings.Split(identities, "\n")
for _, uid := range memsSplit {
uid = strings.TrimSpace(uid)
if uid == "" {
continue
}
var ident string
if domain == "" {
ident = fmt.Sprintf("user:%s", uid)
} else {
ident = fmt.Sprintf("user:%s@%s", uid, domain)
}
emailIdent, err := identity.MakeIdentity(ident)
if err != nil {
return nil, err
}
members[emailIdent] = true
}
membersSorted := make([]identity.Identity, 0, len(members))
for mem := range members {
membersSorted = append(membersSorted, mem)
}
sort.Slice(membersSorted, func(i, j int) bool {
return membersSorted[i].Value() < membersSorted[j].Value()
})
return membersSorted, nil
}
// importBundles imports given set of bundles all at once.
// A bundle is a map with groups that is the result of a processing of some tarball.
// A bundle specifies the desired state of all groups under some system, e.g.
// importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups.
//
// Group names in the bundle are specified in their full prefixed form (with
// system name prefix). An example of expected 'bundles':
//
// {
// 'ldap': {
// 'ldap/group': [Identity(...), Identity(...)],
// },
// }
//
// Args:
//
// bundles: map system name -> GroupBundle
// providedBy: auth.Identity to put in modifiedBy or createdBy fields.
//
// Returns:
//
// (list of modified groups,
// new AuthDB revision number or 0 if no changes,
// error if issue with writing entities).
func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) {
// Nothing to process.
if len(bundles) == 0 {
return []string{}, 0, nil
}
getAuthDBRevision := func(ctx context.Context) (int64, error) {
state, err := GetReplicationState(ctx)
switch {
case err == datastore.ErrNoSuchEntity:
return 0, nil
case err != nil:
return -1, err
default:
return state.AuthDBRev, nil
}
}
// Fetches all existing groups and AuthDB revision number.
groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) {
err = datastore.RunInTransaction(ctx, func(ctx context.Context) error {
groups, err := GetAllAuthGroups(ctx)
if err != nil {
return err
}
gMap = make(map[string]*AuthGroup, len(groups))
for _, g := range groups {
gMap[g.ID] = g
}
rev, err = getAuthDBRevision(ctx)
if err != nil {
return errors.Annotate(err, "couldn't get AuthDBRev").Err()
}
return nil
}, nil)
return gMap, rev, err
}
// Transactionally puts and deletes a bunch of entities.
applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error {
// Runs in transaction.
return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error {
rev, err := getAuthDBRevision(ctx)
if err != nil {
return err
}
// DB changed between transactions try again.
if rev != expectedRevision {
return errors.New("revision numbers don't match")
}
for _, e := range entitiesToPut {
if err := cae(e, ts, providedBy, false); err != nil {
return err
}
}
for _, e := range entitiesToDelete {
if err := cae(e, ts, providedBy, true); err != nil {
return err
}
}
return nil
})
}
updatedGroups := stringset.New(0)
revision := int64(0)
loopCount := 0
var groups map[string]*AuthGroup
var err error
// Try to apply the change in batches until it lands completely or deadline
// happens. Split each batch update into two transactions (assuming AuthDB
// changes infrequently) to avoid reading and writing too much stuff from
// within a single transaction (and to avoid keeping the transaction open while
// calculating the diff).
for {
| GetGroupImporterConfig | identifier_name |
storeserv.py | verify=True):
"""
HPE 3PAR constructor.
:param str address: Hostname or IP address of HPE 3PAR array
(management address). Web Services API should be enabled for this
array (disabled by default). To enable Web Services API you should
check 3PAR OS command: showwsapi.
:param str username: User name for 3PAR Web Services API. Its
recommended to create dedicated user with limited rights. For
example, if you dont need to create/modify/delete objects on disk
array, you should create new user with "browse" role. Of coarse,
your script can work with "3paradm" user ("super" role), but
its a bad idea. To create new user, you should check 3PAR OS
command: createuser.
:param str password: Password for 3PAR Web Services API.
:param int port: (optional) Custom port number for 3PAR Web Services
API.
:param bool ssl: (optional) Use secure https (True) or plain text
http (False).
:param bool|string verify: (optional) Either a boolean, in which case it
controls whether we verify the Rest serverโs TLS certificate,
or a string, in which case it must be a path to a CA
bundle to use. By default: True.
:return: None
"""
self._address = address
self._username = username
self._password = password
self._port = port
self._ssl = ssl
self._verify = verify
# Session key. None, if there is not active session.
self._key = None
# Default timeouts:
# ConnectionTimeout = 1 second
# ReadTimeout = infinity
self._timeout = (1, None)
# Default request headers
self._headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept-Language': 'en'
}
def __del__(self):
# Perform session close
if self._key is not None:
self.close()
def _query(self, url, method, **kwargs):
"""
Perform HTTP request to HPE 3PAR array.
:param str url: URL address. For example: 'system' or 'volumes'.
Static part of url is generated automatically.
:param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or
'PUT'.
:param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout`
but only for one query.
:rtype: tuple(int, dict)
:return: Dictionary with HTTP status code and json data.
For example: dict('status':200, 'data':{'key':'value'}).
Second value may be None if 3PAR array returns no message body,
"""
# Set connection and read timeout (if not set by user for current request)
timeout = kwargs.pop('timeout', self._timeout)
# Add default and auth headers to parameter list
kwargs.setdefault('headers', dict())
kwargs['headers'].update(self._headers)
# Prepare request
path = '%s/%s' % (self._base_url, url.strip('/'))
request = requests.Request(method, path, **kwargs)
prep = request.prepare()
LOG.debug('%s(`%s`)', method, prep.url)
LOG.debug('Request body = `%s`', prep.body)
# Perform request with runtime measuring
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
try:
session = requests.Session()
resp = session.send(prep, timeout=timeout, verify=self._verify)
deltafmt = '%d.%d sec' % (resp.elapsed.seconds,
resp.elapsed.microseconds // 1000)
except Exception as error:
LOG.fatal('Cannot connect to StoreServ device. %s',
repr(error))
raise
# Check Rest service response
if resp.status_code not in [200, 201, 202, 204]:
LOG.warning('Return code %s, response delay %s',
resp.status_code,
deltafmt)
LOG.warning('resp.content=%s', resp.content)
LOG.warning('resp.reason=%s', resp.reason)
else:
LOG.debug('StoreServ return status %s, delay %s',
resp.status_code,
deltafmt)
# Check response JSON body is exist
try:
jdata = resp.json()
except ValueError:
if resp.content:
LOG.warning('Cannot decode JSON. Source string: "%s"',
resp.content)
return resp.status_code, None # (status, data)
# Check wsapi session timeout error
if (resp.status_code == 403) and (jdata.get('code', None) == 6):
if self._key is not None:
LOG.info('Session timeout occurs. Session key is invalid. '
'Try to get new one.')
# Just forget about current (inactive) session
self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None)
self._key = None
# Generate new session and replay last query
try:
self.open()
replay = self._query(url, method, **kwargs)
except Exception as error:
LOG.fatal('Cannot open new WSAPI session. Exception: %s',
repr(error))
raise
else:
LOG.debug('Request replay success.')
return replay
return resp.status_code, jdata
def open(self):
"""
Open new Rest API session for HPE 3PAR array. You should call it prior
any other requests. Do not forget to call :meth:`StoreServ.close` if
you don't plan to use session anymore, because 3PAR array has active
sessions limit.
If some troubles occurs you should manually check:
* 3PAR Web services API are enabled on array (3PAR OS
command: 'showwsapi')
* Array credentials (username and password)
* 3PAR array management address is correct and available
* Debug logs generated by python logging module
:return: None
"""
auth = {'user': self._username, 'password': self._password}
status, data = self.post('credentials', body=auth)
if status == 201:
# 201 (created) => Session succefully created
self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']})
self._key = data['key']
elif status == 403:
# 403 (forbidden) => Wrong user or password
raise AuthError('Cannot connect to StoreServ. '
'Authentification error: %s', data['desc'])
def close(self):
"""
Close Rest API session.
:return: None
"""
# There isnt active session
if self._key is None:
LOG.debug('There isnt active session - skipping session close.')
return
# Try to close active session
path = 'credentials/' + self._key
try:
self.delete(path)
except Exception as error:
LOG.warning('Cannot close StoreServ 3PAR session '
'gracefully. Exception occured: %s',
repr(error))
else:
self._headers.pop('X-HP3PAR-WSAPI-SessionKey')
self._key = None
def get(self, url, query=None):
"" | return self._query(url, 'GET')
def post(self, url, body):
"""
Perform HTTP POST request to HPE 3PAR array. Method used to create new
objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services API
Developer's Guide"
:param dict body: Request parameter, used to create new array object.
:rtype: tuple (int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (201, {'key':'value'}). Second | "
Perform HTTP GET request to HPE 3PAR array. Method used to get
information about objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's and requests
result are described in "HPE 3PAR Web Services API Developer's
Guide"
:param str query: (optional) Query filter specification (see "WSAPI
query syntax" in "HPE 3PAR Web Services API Developer's Guide").
:rtype: tuple(int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (200, {'key':'value'}).
"""
# Perform get request with query filter
if query is not None:
return self._query(url, 'GET', params=quote(f'query="{query}"'))
# Perform simple get request | identifier_body |
storeserv.py | % (self._base_url, url.strip('/'))
request = requests.Request(method, path, **kwargs)
prep = request.prepare()
LOG.debug('%s(`%s`)', method, prep.url)
LOG.debug('Request body = `%s`', prep.body)
# Perform request with runtime measuring
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
try:
session = requests.Session()
resp = session.send(prep, timeout=timeout, verify=self._verify)
deltafmt = '%d.%d sec' % (resp.elapsed.seconds,
resp.elapsed.microseconds // 1000)
except Exception as error:
LOG.fatal('Cannot connect to StoreServ device. %s',
repr(error))
raise
# Check Rest service response
if resp.status_code not in [200, 201, 202, 204]:
LOG.warning('Return code %s, response delay %s',
resp.status_code,
deltafmt)
LOG.warning('resp.content=%s', resp.content)
LOG.warning('resp.reason=%s', resp.reason)
else:
LOG.debug('StoreServ return status %s, delay %s',
resp.status_code,
deltafmt)
# Check response JSON body is exist
try:
jdata = resp.json()
except ValueError:
if resp.content:
LOG.warning('Cannot decode JSON. Source string: "%s"',
resp.content)
return resp.status_code, None # (status, data)
# Check wsapi session timeout error
if (resp.status_code == 403) and (jdata.get('code', None) == 6):
if self._key is not None:
LOG.info('Session timeout occurs. Session key is invalid. '
'Try to get new one.')
# Just forget about current (inactive) session
self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None)
self._key = None
# Generate new session and replay last query
try:
self.open()
replay = self._query(url, method, **kwargs)
except Exception as error:
LOG.fatal('Cannot open new WSAPI session. Exception: %s',
repr(error))
raise
else:
LOG.debug('Request replay success.')
return replay
return resp.status_code, jdata
def open(self):
"""
Open new Rest API session for HPE 3PAR array. You should call it prior
any other requests. Do not forget to call :meth:`StoreServ.close` if
you don't plan to use session anymore, because 3PAR array has active
sessions limit.
If some troubles occurs you should manually check:
* 3PAR Web services API are enabled on array (3PAR OS
command: 'showwsapi')
* Array credentials (username and password)
* 3PAR array management address is correct and available
* Debug logs generated by python logging module
:return: None
"""
auth = {'user': self._username, 'password': self._password}
status, data = self.post('credentials', body=auth)
if status == 201:
# 201 (created) => Session succefully created
self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']})
self._key = data['key']
elif status == 403:
# 403 (forbidden) => Wrong user or password
raise AuthError('Cannot connect to StoreServ. '
'Authentification error: %s', data['desc'])
def close(self):
"""
Close Rest API session.
:return: None
"""
# There isnt active session
if self._key is None:
LOG.debug('There isnt active session - skipping session close.')
return
# Try to close active session
path = 'credentials/' + self._key
try:
self.delete(path)
except Exception as error:
LOG.warning('Cannot close StoreServ 3PAR session '
'gracefully. Exception occured: %s',
repr(error))
else:
self._headers.pop('X-HP3PAR-WSAPI-SessionKey')
self._key = None
def get(self, url, query=None):
"""
Perform HTTP GET request to HPE 3PAR array. Method used to get
information about objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's and requests
result are described in "HPE 3PAR Web Services API Developer's
Guide"
:param str query: (optional) Query filter specification (see "WSAPI
query syntax" in "HPE 3PAR Web Services API Developer's Guide").
:rtype: tuple(int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (200, {'key':'value'}).
"""
# Perform get request with query filter
if query is not None:
return self._query(url, 'GET', params=quote(f'query="{query}"'))
# Perform simple get request
return self._query(url, 'GET')
def post(self, url, body):
"""
Perform HTTP POST request to HPE 3PAR array. Method used to create new
objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services API
Developer's Guide"
:param dict body: Request parameter, used to create new array object.
:rtype: tuple (int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (201, {'key':'value'}). Second value may be None if
3PAR array returns no message body.
"""
return self._query(url, 'POST', json=body)
def delete(self, url):
"""
Perform HTTP DELETE request to HPE 3PAR array.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services API
Developer's Guide"
:return: Tuple with HTTP status code and dict with request result. For
example: (200, {'key':'value'}). Second value may be None if 3PAR
array returns no message body.
"""
return self._query(url, 'DELETE')
def put(self, url, body):
"""
Perform HTTP PUT request to HPE 3PAR array.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services
API Developer's Guide"
:param dict body: Request parameter, used to modify array object.
:rtype: tuple(int, dict)
:return: Tuple with HTTP status code and dict with request result. For
example: (200, {'key':'value'}). Second value may be None if 3PAR
array returns no message body.
"""
return self._query(url, 'PUT', json=body)
def _set_timeout(self, timeout):
if isinstance(timeout, (float, int)):
self._timeout = (timeout, timeout)
elif isinstance(timeout, tuple):
self._timeout = timeout
def _get_timeout(self):
return self._timeout
timeout = property(_get_timeout, _set_timeout)
"""
:var float|tuple timeout: Number of seconds that Rest API
client waits for response from HPE StoreServ
before timeout exception generation. You can use
different timeouts for connection setup and for getting
first piece of data. In this case, you should use
tuple(float, float) with first value - connection
timeout and the second value - read timeout. Or if
you want to use same values for both type of timeouts,
you can use one float value. 'None' value can be used
instead to wait forever for a device response. Default
value: (1, None)
"""
@property
def _base_url(self):
"""
Generate static part of URL.
:rtype: str
:return: Static part of URL
"""
# URL Protocol
proto = 'https' if self._ssl else 'http'
# Device port number
if self._port is None:
port = 8080 if self._ssl else 8008
else:
port = self._port
return f'{proto}://{self._address}:{port}/api/v1'
def __enter__(self):
return self
def __ | exit__(s | identifier_name |
|
storeserv.py | verify=True):
"""
HPE 3PAR constructor.
:param str address: Hostname or IP address of HPE 3PAR array
(management address). Web Services API should be enabled for this
array (disabled by default). To enable Web Services API you should
check 3PAR OS command: showwsapi.
:param str username: User name for 3PAR Web Services API. Its
recommended to create dedicated user with limited rights. For
example, if you dont need to create/modify/delete objects on disk
array, you should create new user with "browse" role. Of coarse,
your script can work with "3paradm" user ("super" role), but
its a bad idea. To create new user, you should check 3PAR OS
command: createuser.
:param str password: Password for 3PAR Web Services API.
:param int port: (optional) Custom port number for 3PAR Web Services
API.
:param bool ssl: (optional) Use secure https (True) or plain text
http (False).
:param bool|string verify: (optional) Either a boolean, in which case it
controls whether we verify the Rest serverโs TLS certificate,
or a string, in which case it must be a path to a CA
bundle to use. By default: True.
:return: None
"""
self._address = address
self._username = username
self._password = password
self._port = port
self._ssl = ssl
self._verify = verify
# Session key. None, if there is not active session.
self._key = None
# Default timeouts:
# ConnectionTimeout = 1 second
# ReadTimeout = infinity
self._timeout = (1, None)
# Default request headers
self._headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept-Language': 'en'
}
def __del__(self):
# Perform session close
if self._key is not None:
self.close()
def _query(self, url, method, **kwargs):
"""
Perform HTTP request to HPE 3PAR array.
:param str url: URL address. For example: 'system' or 'volumes'.
Static part of url is generated automatically.
:param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or
'PUT'.
:param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout`
but only for one query.
:rtype: tuple(int, dict)
:return: Dictionary with HTTP status code and json data.
For example: dict('status':200, 'data':{'key':'value'}).
Second value may be None if 3PAR array returns no message body,
"""
# Set connection and read timeout (if not set by user for current request)
timeout = kwargs.pop('timeout', self._timeout)
# Add default and auth headers to parameter list
kwargs.setdefault('headers', dict())
kwargs['headers'].update(self._headers)
# Prepare request
path = '%s/%s' % (self._base_url, url.strip('/'))
request = requests.Request(method, path, **kwargs)
prep = request.prepare()
LOG.debug('%s(`%s`)', method, prep.url)
LOG.debug('Request body = `%s`', prep.body)
# Perform request with runtime measuring
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
try:
session = requests.Session()
resp = session.send(prep, timeout=timeout, verify=self._verify)
deltafmt = '%d.%d sec' % (resp.elapsed.seconds,
resp.elapsed.microseconds // 1000)
except Exception as error:
LOG.fatal('Cannot connect to StoreServ device. %s',
repr(error))
raise
# Check Rest service response
if resp.status_code not in [200, 201, 202, 204]:
LOG.warning('Return code %s, response delay %s',
resp.status_code,
deltafmt)
LOG.warning('resp.content=%s', resp.content)
LOG.warning('resp.reason=%s', resp.reason)
else:
LOG.debug('StoreServ return status %s, delay %s',
resp.status_code,
deltafmt)
# Check response JSON body is exist
try:
jdata = resp.json()
except ValueError:
if resp.content:
LOG.warning('Cannot decode JSON. Source string: "%s"',
resp.content)
return resp.status_code, None # (status, data)
# Check wsapi session timeout error
if (resp.status_code == 403) and (jdata.get('code', None) == 6):
if self._key is not None:
LOG.info('Session timeout occurs. Session key is invalid. '
'Try to get new one.')
# Just forget about current (inactive) session
self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None)
self._key = None
# Generate new session and replay last query
try:
self.open()
replay = self._query(url, method, **kwargs)
except Exception as error:
LOG.fatal('Cannot open new WSAPI session. Exception: %s',
repr(error))
raise
else:
LOG.debug('Request replay success.')
return replay
return resp.status_code, jdata
def open(self):
"""
Open new Rest API session for HPE 3PAR array. You should call it prior
any other requests. Do not forget to call :meth:`StoreServ.close` if
you don't plan to use session anymore, because 3PAR array has active
sessions limit.
If some troubles occurs you should manually check:
* 3PAR Web services API are enabled on array (3PAR OS
command: 'showwsapi')
* Array credentials (username and password)
* 3PAR array management address is correct and available
* Debug logs generated by python logging module
:return: None
"""
auth = {'user': self._username, 'password': self._password}
status, data = self.post('credentials', body=auth)
if status == 201:
# 201 (created) => Session succefully created
se | elif status == 403:
# 403 (forbidden) => Wrong user or password
raise AuthError('Cannot connect to StoreServ. '
'Authentification error: %s', data['desc'])
def close(self):
"""
Close Rest API session.
:return: None
"""
# There isnt active session
if self._key is None:
LOG.debug('There isnt active session - skipping session close.')
return
# Try to close active session
path = 'credentials/' + self._key
try:
self.delete(path)
except Exception as error:
LOG.warning('Cannot close StoreServ 3PAR session '
'gracefully. Exception occured: %s',
repr(error))
else:
self._headers.pop('X-HP3PAR-WSAPI-SessionKey')
self._key = None
def get(self, url, query=None):
"""
Perform HTTP GET request to HPE 3PAR array. Method used to get
information about objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's and requests
result are described in "HPE 3PAR Web Services API Developer's
Guide"
:param str query: (optional) Query filter specification (see "WSAPI
query syntax" in "HPE 3PAR Web Services API Developer's Guide").
:rtype: tuple(int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (200, {'key':'value'}).
"""
# Perform get request with query filter
if query is not None:
return self._query(url, 'GET', params=quote(f'query="{query}"'))
# Perform simple get request
return self._query(url, 'GET')
def post(self, url, body):
"""
Perform HTTP POST request to HPE 3PAR array. Method used to create new
objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services API
Developer's Guide"
:param dict body: Request parameter, used to create new array object.
:rtype: tuple (int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (201, {'key':'value | lf._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']})
self._key = data['key']
| conditional_block |
storeserv.py | LOG = logging.getLogger('hpestorapi.storeserv')
class StoreServ:
"""
HPE 3PAR array implementation class.
"""
def __init__(self, address, username, password, port=None, ssl=True, verify=True):
"""
HPE 3PAR constructor.
:param str address: Hostname or IP address of HPE 3PAR array
(management address). Web Services API should be enabled for this
array (disabled by default). To enable Web Services API you should
check 3PAR OS command: showwsapi.
:param str username: User name for 3PAR Web Services API. Its
recommended to create dedicated user with limited rights. For
example, if you dont need to create/modify/delete objects on disk
array, you should create new user with "browse" role. Of coarse,
your script can work with "3paradm" user ("super" role), but
its a bad idea. To create new user, you should check 3PAR OS
command: createuser.
:param str password: Password for 3PAR Web Services API.
:param int port: (optional) Custom port number for 3PAR Web Services
API.
:param bool ssl: (optional) Use secure https (True) or plain text
http (False).
:param bool|string verify: (optional) Either a boolean, in which case it
controls whether we verify the Rest serverโs TLS certificate,
or a string, in which case it must be a path to a CA
bundle to use. By default: True.
:return: None
"""
self._address = address
self._username = username
self._password = password
self._port = port
self._ssl = ssl
self._verify = verify
# Session key. None, if there is not active session.
self._key = None
# Default timeouts:
# ConnectionTimeout = 1 second
# ReadTimeout = infinity
self._timeout = (1, None)
# Default request headers
self._headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept-Language': 'en'
}
def __del__(self):
# Perform session close
if self._key is not None:
self.close()
def _query(self, url, method, **kwargs):
"""
Perform HTTP request to HPE 3PAR array.
:param str url: URL address. For example: 'system' or 'volumes'.
Static part of url is generated automatically.
:param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or
'PUT'.
:param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout`
but only for one query.
:rtype: tuple(int, dict)
:return: Dictionary with HTTP status code and json data.
For example: dict('status':200, 'data':{'key':'value'}).
Second value may be None if 3PAR array returns no message body,
"""
# Set connection and read timeout (if not set by user for current request)
timeout = kwargs.pop('timeout', self._timeout)
# Add default and auth headers to parameter list
kwargs.setdefault('headers', dict())
kwargs['headers'].update(self._headers)
# Prepare request
path = '%s/%s' % (self._base_url, url.strip('/'))
request = requests.Request(method, path, **kwargs)
prep = request.prepare()
LOG.debug('%s(`%s`)', method, prep.url)
LOG.debug('Request body = `%s`', prep.body)
# Perform request with runtime measuring
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
try:
session = requests.Session()
resp = session.send(prep, timeout=timeout, verify=self._verify)
deltafmt = '%d.%d sec' % (resp.elapsed.seconds,
resp.elapsed.microseconds // 1000)
except Exception as error:
LOG.fatal('Cannot connect to StoreServ device. %s',
repr(error))
raise
# Check Rest service response
if resp.status_code not in [200, 201, 202, 204]:
LOG.warning('Return code %s, response delay %s',
resp.status_code,
deltafmt)
LOG.warning('resp.content=%s', resp.content)
LOG.warning('resp.reason=%s', resp.reason)
else:
LOG.debug('StoreServ return status %s, delay %s',
resp.status_code,
deltafmt)
# Check response JSON body is exist
try:
jdata = resp.json()
except ValueError:
if resp.content:
LOG.warning('Cannot decode JSON. Source string: "%s"',
resp.content)
return resp.status_code, None # (status, data)
# Check wsapi session timeout error
if (resp.status_code == 403) and (jdata.get('code', None) == 6):
if self._key is not None:
LOG.info('Session timeout occurs. Session key is invalid. '
'Try to get new one.')
# Just forget about current (inactive) session
self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None)
self._key = None
# Generate new session and replay last query
try:
self.open()
replay = self._query(url, method, **kwargs)
except Exception as error:
LOG.fatal('Cannot open new WSAPI session. Exception: %s',
repr(error))
raise
else:
LOG.debug('Request replay success.')
return replay
return resp.status_code, jdata
def open(self):
"""
Open new Rest API session for HPE 3PAR array. You should call it prior
any other requests. Do not forget to call :meth:`StoreServ.close` if
you don't plan to use session anymore, because 3PAR array has active
sessions limit.
If some troubles occurs you should manually check:
* 3PAR Web services API are enabled on array (3PAR OS
command: 'showwsapi')
* Array credentials (username and password)
* 3PAR array management address is correct and available
* Debug logs generated by python logging module
:return: None
"""
auth = {'user': self._username, 'password': self._password}
status, data = self.post('credentials', body=auth)
if status == 201:
# 201 (created) => Session succefully created
self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']})
self._key = data['key']
elif status == 403:
# 403 (forbidden) => Wrong user or password
raise AuthError('Cannot connect to StoreServ. '
'Authentification error: %s', data['desc'])
def close(self):
"""
Close Rest API session.
:return: None
"""
# There isnt active session
if self._key is None:
LOG.debug('There isnt active session - skipping session close.')
return
# Try to close active session
path = 'credentials/' + self._key
try:
self.delete(path)
except Exception as error:
LOG.warning('Cannot close StoreServ 3PAR session '
'gracefully. Exception occured: %s',
repr(error))
else:
self._headers.pop('X-HP3PAR-WSAPI-SessionKey')
self._key = None
def get(self, url, query=None):
"""
Perform HTTP GET request to HPE 3PAR array. Method used to get
information about objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's and requests
result are described in "HPE 3PAR Web Services API Developer's
Guide"
:param str query: (optional) Query filter specification (see "WSAPI
query syntax" in "HPE 3PAR Web Services API Developer's Guide").
:rtype: tuple(int, dict)
:return: Tuple with HTTP status code and dict with request result.
For example: (200, {'key':'value'}).
"""
# Perform get request with query filter
if query is not None:
return self._query(url, 'GET', params=quote(f'query="{query}"'))
# Perform simple get request
return self._query(url, 'GET')
def post(self, url, body):
"""
Perform HTTP POST request to HPE 3PAR array. Method used to create new
objects.
:param str url: URL address. Base part of url address is generated
automatically and you should not care about it. Example of valid
url: 'system' or 'volumes'. All available url's, request
parameters and results are described in "HPE 3PAR Web Services API
| if __name__ == "__main__":
pass
| random_line_split |
|
repository_repository.py | ('ir.module.module', string='Modules')
module_count = fields.Integer('Modules')
state = fields.Selection(string="Estado",
selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'),
('disabled', 'Disabled')], default='draft', readonly=True,)
addons_paths = fields.Selection(nlist_path,
string="Add-ons Paths", help="Please choose one of these directories to put "
"your module in", )
password = fields.Char(string='Password', required=False)
user = fields.Char(string='User', required=False)
log = fields.Char(string='Log', required=False)
def log_(self, mensaje):
now = datetime.now()
self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)})
requiremet = fields.Char(
string='Requiremet',
required=False)
def _compute_apps(self):
module = self.env['ir.module.module']
curr_addons_path = set(config['addons_path'].split(','))
if self.path in curr_addons_path:
self.state = 'enabled'
if self.state == 'enabled':
module_names = find_modules(self.path)
self.module_ids = module.search([('name', 'in', module_names)])
self.module_count = len(self.module_ids)
else:
self.module_ids = False
self.module_count = 0
def copy(self, default=None):
raise exceptions.Warning(_("The repository cannot be cloned."))
def unlink(self):
if self.env.context.get('remove_repository'):
for rec in self:
if rec.state == 'enabled':
raise exceptions.Warning(_('Unable to remove an enabled repository.'))
res = Git(self.path)
res.load()
res.remove()
return super(RepositoryRepository, self).unlink()
def action_open_modules(self):
self.ensure_one()
return {
'name': self.source,
'type': 'ir.actions.act_window',
'res_model': 'ir.module.module',
'view_type': 'form',
'view_mode': 'kanban,tree,form',
'target': 'current',
'domain': [('id', 'in', self.module_ids.ids)]
}
def install_requirements(self):
try:
requirement_file = self.path + '/requirements.txt'
if os.path.exists(requirement_file):
subprocess.check_call(["pip3", "install", "-r", requirement_file])
except Exception as e:
log_("Exception exception occured: {}".format(e))
def action_enabled(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if config._is_addons_path(self.path) and self.path not in addons_path:
addons_path.insert(0, self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
self.state = 'enabled'
requirement_file = self.path + '/requiremet.txt'
if os.path.exists(requirement_file):
f = open(requirement_file, "r")
self.requiremet = f.read()
self.install_requirements()
self._compute_apps()
return self.env.ref(
'base.action_view_base_module_update').read()[0]
def action_remove(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
self.with_context(remove_repository=True).unlink()
except Exception as e:
raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e))
return {'type': 'ir.actions.act_window_close'}
def restart(self):
service.server.restart()
def pull_all():
repo_ids = self.env['repository.repository'].search([])
for r in repo_ids:
r.update()
service.server.restart()
def action_update(self):
self.ensure_one()
self.update()
service.server.restart()
def update(self):
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
now_time = datetime.now() + timedelta(seconds=60)
cron_obj = self.env['ir.cron']
res = Git(self.path, self.user, self.password)
res.load()
res.update(self.source)
for l in res.log():
self.log_(l)
# self.install_requirements()
self._compute_apps()
model_id = self.env['ir.model'].search(
[('model', '=', 'ir.module.module')])
cron_data = {
'name': "Update Modules",
'code': 'model.upgrade_changed_checksum(%s)' % self.id,
'nextcall': now_time,
'numbercall': -1,
'user_id': self._uid,
'model_id': model_id.id,
'state': 'code',
}
cron = cron_obj.sudo().create(cron_data)
except Exception as e:
raise exceptions.Warning(_("'%s':\n%s") % (self.source, e))
def action_disable(self):
self.ensure_one()
self.state = 'disabled'
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if self.path in addons_path:
if self.module_ids.filtered(lambda r: r.state not in (
'uninstalled', 'uninstallable')):
raise exceptions.Warning(
_('Some modules of this repository are installed.'))
addons_path.remove(self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
def clone(self):
self.state = 'cloned'
self.ensure_one()
self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch))
shutil.rmtree(self.path)
try:
res = Git(self.path)
res.init(self.source, branch=self.branch, user=self.user, password=self.password)
res.load()
self.env.cr.commit()
service.server.restart()
except Exception as e:
raise exceptions.Warning(_(
"An error has occurred while Clone '%s':\n%s") % (self.source, e))
def _default_repository_ids(self):
res = self.env['repository.repository']
for path in config['addons_path'].split(','):
git = Git(path)
if git.load():
data = git.info()
result = res.search([('path', '=', data['path'])])
if not result:
data.update({'state': 'enabled'})
result = res.create(data)
result._compute_apps()
self.env.cr.commit()
def remove_finish_import_crons(self):
model_id = self.env['ir.model'].search(
[('model', '=', 'repository.repository')])
cron_ids = self.env['ir.cron'].search(
[('model_id', '=', model_id.id)])
# Remove completed cron
cron_ids.unlink()
def find_modules(path):
return [module for module in os.listdir(path) if any(map(
lambda f: isfile(path_join(path, module, f)), (
'__manifest__.py', '__openerp__.py')))]
class Git():
_source_http = None
_source_git = None
_repo = None
_user = None
_pass = None
_path = None
_output_list = []
def __init__(self, path=None, user=None, password=None):
self._path = path
self._user = user
self._pass = password
def remove(self):
if self.is_initialized() and not self.is_clean():
raise exceptions.Warning(_("Error, Repository no clean."))
if self._path and is_dir(self._path):
shutil.rmtree(self._path)
def is_initialized(self):
return not not self._repo
def init(self, source, branch=None, user=None, password=None):
self._user = user
self._pass = password
if not self.is_initialized():
if not self._user:
self._repo = Repo.clone_from(source, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source
else:
source = source.replace('https://', '')
source_git = "https://" + self._user + ":" + self._pass + "@" + source
self._source_git=source_git
self._repo = Repo.clone_from(source_git, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source_git
def is_clean(self):
return self.is_initialized() and not self._repo.is_dirty(untracked_files=True)
def load(self, **kwargs):
if not self._repo:
if self._path and is_dir(path_join(self._path, '.git')):
|
else:
return False
def info(self):
branch = self._repo.active_branch.name
curr_rev = self._repo.rev_parse(branch)
git = self.info_base()
source = self._repo.remotes.origin.url.split('@')
if len(source) > 1:
source = "https://" + source[1]
else:
source = self._repo.remotes.origin.url
return dict(g | self._repo = Repo(self._path)
return True | conditional_block |
repository_repository.py | ('ir.module.module', string='Modules')
module_count = fields.Integer('Modules')
state = fields.Selection(string="Estado",
selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'),
('disabled', 'Disabled')], default='draft', readonly=True,)
addons_paths = fields.Selection(nlist_path,
string="Add-ons Paths", help="Please choose one of these directories to put "
"your module in", )
password = fields.Char(string='Password', required=False)
user = fields.Char(string='User', required=False)
log = fields.Char(string='Log', required=False)
def log_(self, mensaje):
now = datetime.now()
self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)})
requiremet = fields.Char(
string='Requiremet',
required=False)
def _compute_apps(self):
module = self.env['ir.module.module']
curr_addons_path = set(config['addons_path'].split(','))
if self.path in curr_addons_path:
self.state = 'enabled'
if self.state == 'enabled':
module_names = find_modules(self.path)
self.module_ids = module.search([('name', 'in', module_names)])
self.module_count = len(self.module_ids)
else:
self.module_ids = False
self.module_count = 0
def copy(self, default=None):
raise exceptions.Warning(_("The repository cannot be cloned."))
def unlink(self):
if self.env.context.get('remove_repository'):
for rec in self:
if rec.state == 'enabled':
raise exceptions.Warning(_('Unable to remove an enabled repository.'))
res = Git(self.path)
res.load()
res.remove()
return super(RepositoryRepository, self).unlink()
def action_open_modules(self):
self.ensure_one()
return {
'name': self.source,
'type': 'ir.actions.act_window', | 'target': 'current',
'domain': [('id', 'in', self.module_ids.ids)]
}
def install_requirements(self):
try:
requirement_file = self.path + '/requirements.txt'
if os.path.exists(requirement_file):
subprocess.check_call(["pip3", "install", "-r", requirement_file])
except Exception as e:
log_("Exception exception occured: {}".format(e))
def action_enabled(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if config._is_addons_path(self.path) and self.path not in addons_path:
addons_path.insert(0, self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
self.state = 'enabled'
requirement_file = self.path + '/requiremet.txt'
if os.path.exists(requirement_file):
f = open(requirement_file, "r")
self.requiremet = f.read()
self.install_requirements()
self._compute_apps()
return self.env.ref(
'base.action_view_base_module_update').read()[0]
def action_remove(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
self.with_context(remove_repository=True).unlink()
except Exception as e:
raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e))
return {'type': 'ir.actions.act_window_close'}
def restart(self):
service.server.restart()
def pull_all():
repo_ids = self.env['repository.repository'].search([])
for r in repo_ids:
r.update()
service.server.restart()
def action_update(self):
self.ensure_one()
self.update()
service.server.restart()
def update(self):
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
now_time = datetime.now() + timedelta(seconds=60)
cron_obj = self.env['ir.cron']
res = Git(self.path, self.user, self.password)
res.load()
res.update(self.source)
for l in res.log():
self.log_(l)
# self.install_requirements()
self._compute_apps()
model_id = self.env['ir.model'].search(
[('model', '=', 'ir.module.module')])
cron_data = {
'name': "Update Modules",
'code': 'model.upgrade_changed_checksum(%s)' % self.id,
'nextcall': now_time,
'numbercall': -1,
'user_id': self._uid,
'model_id': model_id.id,
'state': 'code',
}
cron = cron_obj.sudo().create(cron_data)
except Exception as e:
raise exceptions.Warning(_("'%s':\n%s") % (self.source, e))
def action_disable(self):
self.ensure_one()
self.state = 'disabled'
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if self.path in addons_path:
if self.module_ids.filtered(lambda r: r.state not in (
'uninstalled', 'uninstallable')):
raise exceptions.Warning(
_('Some modules of this repository are installed.'))
addons_path.remove(self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
def clone(self):
self.state = 'cloned'
self.ensure_one()
self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch))
shutil.rmtree(self.path)
try:
res = Git(self.path)
res.init(self.source, branch=self.branch, user=self.user, password=self.password)
res.load()
self.env.cr.commit()
service.server.restart()
except Exception as e:
raise exceptions.Warning(_(
"An error has occurred while Clone '%s':\n%s") % (self.source, e))
def _default_repository_ids(self):
res = self.env['repository.repository']
for path in config['addons_path'].split(','):
git = Git(path)
if git.load():
data = git.info()
result = res.search([('path', '=', data['path'])])
if not result:
data.update({'state': 'enabled'})
result = res.create(data)
result._compute_apps()
self.env.cr.commit()
def remove_finish_import_crons(self):
model_id = self.env['ir.model'].search(
[('model', '=', 'repository.repository')])
cron_ids = self.env['ir.cron'].search(
[('model_id', '=', model_id.id)])
# Remove completed cron
cron_ids.unlink()
def find_modules(path):
return [module for module in os.listdir(path) if any(map(
lambda f: isfile(path_join(path, module, f)), (
'__manifest__.py', '__openerp__.py')))]
class Git():
_source_http = None
_source_git = None
_repo = None
_user = None
_pass = None
_path = None
_output_list = []
def __init__(self, path=None, user=None, password=None):
self._path = path
self._user = user
self._pass = password
def remove(self):
if self.is_initialized() and not self.is_clean():
raise exceptions.Warning(_("Error, Repository no clean."))
if self._path and is_dir(self._path):
shutil.rmtree(self._path)
def is_initialized(self):
return not not self._repo
def init(self, source, branch=None, user=None, password=None):
self._user = user
self._pass = password
if not self.is_initialized():
if not self._user:
self._repo = Repo.clone_from(source, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source
else:
source = source.replace('https://', '')
source_git = "https://" + self._user + ":" + self._pass + "@" + source
self._source_git=source_git
self._repo = Repo.clone_from(source_git, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source_git
def is_clean(self):
return self.is_initialized() and not self._repo.is_dirty(untracked_files=True)
def load(self, **kwargs):
if not self._repo:
if self._path and is_dir(path_join(self._path, '.git')):
self._repo = Repo(self._path)
return True
else:
return False
def info(self):
branch = self._repo.active_branch.name
curr_rev = self._repo.rev_parse(branch)
git = self.info_base()
source = self._repo.remotes.origin.url.split('@')
if len(source) > 1:
source = "https://" + source[1]
else:
source = self._repo.remotes.origin.url
return dict(g | 'res_model': 'ir.module.module',
'view_type': 'form',
'view_mode': 'kanban,tree,form', | random_line_split |
repository_repository.py | ir.module.module', string='Modules')
module_count = fields.Integer('Modules')
state = fields.Selection(string="Estado",
selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'),
('disabled', 'Disabled')], default='draft', readonly=True,)
addons_paths = fields.Selection(nlist_path,
string="Add-ons Paths", help="Please choose one of these directories to put "
"your module in", )
password = fields.Char(string='Password', required=False)
user = fields.Char(string='User', required=False)
log = fields.Char(string='Log', required=False)
def log_(self, mensaje):
now = datetime.now()
self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)})
requiremet = fields.Char(
string='Requiremet',
required=False)
def _compute_apps(self):
module = self.env['ir.module.module']
curr_addons_path = set(config['addons_path'].split(','))
if self.path in curr_addons_path:
self.state = 'enabled'
if self.state == 'enabled':
module_names = find_modules(self.path)
self.module_ids = module.search([('name', 'in', module_names)])
self.module_count = len(self.module_ids)
else:
self.module_ids = False
self.module_count = 0
def copy(self, default=None):
raise exceptions.Warning(_("The repository cannot be cloned."))
def unlink(self):
if self.env.context.get('remove_repository'):
for rec in self:
if rec.state == 'enabled':
raise exceptions.Warning(_('Unable to remove an enabled repository.'))
res = Git(self.path)
res.load()
res.remove()
return super(RepositoryRepository, self).unlink()
def action_open_modules(self):
self.ensure_one()
return {
'name': self.source,
'type': 'ir.actions.act_window',
'res_model': 'ir.module.module',
'view_type': 'form',
'view_mode': 'kanban,tree,form',
'target': 'current',
'domain': [('id', 'in', self.module_ids.ids)]
}
def install_requirements(self):
try:
requirement_file = self.path + '/requirements.txt'
if os.path.exists(requirement_file):
subprocess.check_call(["pip3", "install", "-r", requirement_file])
except Exception as e:
log_("Exception exception occured: {}".format(e))
def action_enabled(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if config._is_addons_path(self.path) and self.path not in addons_path:
addons_path.insert(0, self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
self.state = 'enabled'
requirement_file = self.path + '/requiremet.txt'
if os.path.exists(requirement_file):
f = open(requirement_file, "r")
self.requiremet = f.read()
self.install_requirements()
self._compute_apps()
return self.env.ref(
'base.action_view_base_module_update').read()[0]
def action_remove(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
self.with_context(remove_repository=True).unlink()
except Exception as e:
raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e))
return {'type': 'ir.actions.act_window_close'}
def restart(self):
service.server.restart()
def pull_all():
repo_ids = self.env['repository.repository'].search([])
for r in repo_ids:
r.update()
service.server.restart()
def action_update(self):
self.ensure_one()
self.update()
service.server.restart()
def update(self):
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
now_time = datetime.now() + timedelta(seconds=60)
cron_obj = self.env['ir.cron']
res = Git(self.path, self.user, self.password)
res.load()
res.update(self.source)
for l in res.log():
self.log_(l)
# self.install_requirements()
self._compute_apps()
model_id = self.env['ir.model'].search(
[('model', '=', 'ir.module.module')])
cron_data = {
'name': "Update Modules",
'code': 'model.upgrade_changed_checksum(%s)' % self.id,
'nextcall': now_time,
'numbercall': -1,
'user_id': self._uid,
'model_id': model_id.id,
'state': 'code',
}
cron = cron_obj.sudo().create(cron_data)
except Exception as e:
raise exceptions.Warning(_("'%s':\n%s") % (self.source, e))
def action_disable(self):
self.ensure_one()
self.state = 'disabled'
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if self.path in addons_path:
if self.module_ids.filtered(lambda r: r.state not in (
'uninstalled', 'uninstallable')):
raise exceptions.Warning(
_('Some modules of this repository are installed.'))
addons_path.remove(self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
def clone(self):
self.state = 'cloned'
self.ensure_one()
self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch))
shutil.rmtree(self.path)
try:
res = Git(self.path)
res.init(self.source, branch=self.branch, user=self.user, password=self.password)
res.load()
self.env.cr.commit()
service.server.restart()
except Exception as e:
raise exceptions.Warning(_(
"An error has occurred while Clone '%s':\n%s") % (self.source, e))
def _default_repository_ids(self):
res = self.env['repository.repository']
for path in config['addons_path'].split(','):
git = Git(path)
if git.load():
data = git.info()
result = res.search([('path', '=', data['path'])])
if not result:
data.update({'state': 'enabled'})
result = res.create(data)
result._compute_apps()
self.env.cr.commit()
def remove_finish_import_crons(self):
model_id = self.env['ir.model'].search(
[('model', '=', 'repository.repository')])
cron_ids = self.env['ir.cron'].search(
[('model_id', '=', model_id.id)])
# Remove completed cron
cron_ids.unlink()
def | (path):
return [module for module in os.listdir(path) if any(map(
lambda f: isfile(path_join(path, module, f)), (
'__manifest__.py', '__openerp__.py')))]
class Git():
_source_http = None
_source_git = None
_repo = None
_user = None
_pass = None
_path = None
_output_list = []
def __init__(self, path=None, user=None, password=None):
self._path = path
self._user = user
self._pass = password
def remove(self):
if self.is_initialized() and not self.is_clean():
raise exceptions.Warning(_("Error, Repository no clean."))
if self._path and is_dir(self._path):
shutil.rmtree(self._path)
def is_initialized(self):
return not not self._repo
def init(self, source, branch=None, user=None, password=None):
self._user = user
self._pass = password
if not self.is_initialized():
if not self._user:
self._repo = Repo.clone_from(source, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source
else:
source = source.replace('https://', '')
source_git = "https://" + self._user + ":" + self._pass + "@" + source
self._source_git=source_git
self._repo = Repo.clone_from(source_git, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source_git
def is_clean(self):
return self.is_initialized() and not self._repo.is_dirty(untracked_files=True)
def load(self, **kwargs):
if not self._repo:
if self._path and is_dir(path_join(self._path, '.git')):
self._repo = Repo(self._path)
return True
else:
return False
def info(self):
branch = self._repo.active_branch.name
curr_rev = self._repo.rev_parse(branch)
git = self.info_base()
source = self._repo.remotes.origin.url.split('@')
if len(source) > 1:
source = "https://" + source[1]
else:
source = self._repo.remotes.origin.url
return dict(g | find_modules | identifier_name |
repository_repository.py | ('ir.module.module', string='Modules')
module_count = fields.Integer('Modules')
state = fields.Selection(string="Estado",
selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'),
('disabled', 'Disabled')], default='draft', readonly=True,)
addons_paths = fields.Selection(nlist_path,
string="Add-ons Paths", help="Please choose one of these directories to put "
"your module in", )
password = fields.Char(string='Password', required=False)
user = fields.Char(string='User', required=False)
log = fields.Char(string='Log', required=False)
def log_(self, mensaje):
now = datetime.now()
self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)})
requiremet = fields.Char(
string='Requiremet',
required=False)
def _compute_apps(self):
module = self.env['ir.module.module']
curr_addons_path = set(config['addons_path'].split(','))
if self.path in curr_addons_path:
self.state = 'enabled'
if self.state == 'enabled':
module_names = find_modules(self.path)
self.module_ids = module.search([('name', 'in', module_names)])
self.module_count = len(self.module_ids)
else:
self.module_ids = False
self.module_count = 0
def copy(self, default=None):
raise exceptions.Warning(_("The repository cannot be cloned."))
def unlink(self):
if self.env.context.get('remove_repository'):
for rec in self:
if rec.state == 'enabled':
raise exceptions.Warning(_('Unable to remove an enabled repository.'))
res = Git(self.path)
res.load()
res.remove()
return super(RepositoryRepository, self).unlink()
def action_open_modules(self):
self.ensure_one()
return {
'name': self.source,
'type': 'ir.actions.act_window',
'res_model': 'ir.module.module',
'view_type': 'form',
'view_mode': 'kanban,tree,form',
'target': 'current',
'domain': [('id', 'in', self.module_ids.ids)]
}
def install_requirements(self):
try:
requirement_file = self.path + '/requirements.txt'
if os.path.exists(requirement_file):
subprocess.check_call(["pip3", "install", "-r", requirement_file])
except Exception as e:
log_("Exception exception occured: {}".format(e))
def action_enabled(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if config._is_addons_path(self.path) and self.path not in addons_path:
addons_path.insert(0, self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
self.state = 'enabled'
requirement_file = self.path + '/requiremet.txt'
if os.path.exists(requirement_file):
f = open(requirement_file, "r")
self.requiremet = f.read()
self.install_requirements()
self._compute_apps()
return self.env.ref(
'base.action_view_base_module_update').read()[0]
def action_remove(self):
self.ensure_one()
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
self.with_context(remove_repository=True).unlink()
except Exception as e:
raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e))
return {'type': 'ir.actions.act_window_close'}
def restart(self):
service.server.restart()
def pull_all():
repo_ids = self.env['repository.repository'].search([])
for r in repo_ids:
r.update()
service.server.restart()
def action_update(self):
self.ensure_one()
self.update()
service.server.restart()
def update(self):
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
try:
now_time = datetime.now() + timedelta(seconds=60)
cron_obj = self.env['ir.cron']
res = Git(self.path, self.user, self.password)
res.load()
res.update(self.source)
for l in res.log():
self.log_(l)
# self.install_requirements()
self._compute_apps()
model_id = self.env['ir.model'].search(
[('model', '=', 'ir.module.module')])
cron_data = {
'name': "Update Modules",
'code': 'model.upgrade_changed_checksum(%s)' % self.id,
'nextcall': now_time,
'numbercall': -1,
'user_id': self._uid,
'model_id': model_id.id,
'state': 'code',
}
cron = cron_obj.sudo().create(cron_data)
except Exception as e:
raise exceptions.Warning(_("'%s':\n%s") % (self.source, e))
def action_disable(self):
self.ensure_one()
self.state = 'disabled'
if not self.env.user.has_group('base.group_system'):
raise exceptions.AccessDenied
addons_path = config['addons_path'].split(',')
if self.path in addons_path:
if self.module_ids.filtered(lambda r: r.state not in (
'uninstalled', 'uninstallable')):
raise exceptions.Warning(
_('Some modules of this repository are installed.'))
addons_path.remove(self.path)
config['addons_path'] = ','.join(addons_path)
config.save()
def clone(self):
self.state = 'cloned'
self.ensure_one()
self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch))
shutil.rmtree(self.path)
try:
res = Git(self.path)
res.init(self.source, branch=self.branch, user=self.user, password=self.password)
res.load()
self.env.cr.commit()
service.server.restart()
except Exception as e:
raise exceptions.Warning(_(
"An error has occurred while Clone '%s':\n%s") % (self.source, e))
def _default_repository_ids(self):
res = self.env['repository.repository']
for path in config['addons_path'].split(','):
git = Git(path)
if git.load():
data = git.info()
result = res.search([('path', '=', data['path'])])
if not result:
data.update({'state': 'enabled'})
result = res.create(data)
result._compute_apps()
self.env.cr.commit()
def remove_finish_import_crons(self):
model_id = self.env['ir.model'].search(
[('model', '=', 'repository.repository')])
cron_ids = self.env['ir.cron'].search(
[('model_id', '=', model_id.id)])
# Remove completed cron
cron_ids.unlink()
def find_modules(path):
|
class Git():
_source_http = None
_source_git = None
_repo = None
_user = None
_pass = None
_path = None
_output_list = []
def __init__(self, path=None, user=None, password=None):
self._path = path
self._user = user
self._pass = password
def remove(self):
if self.is_initialized() and not self.is_clean():
raise exceptions.Warning(_("Error, Repository no clean."))
if self._path and is_dir(self._path):
shutil.rmtree(self._path)
def is_initialized(self):
return not not self._repo
def init(self, source, branch=None, user=None, password=None):
self._user = user
self._pass = password
if not self.is_initialized():
if not self._user:
self._repo = Repo.clone_from(source, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source
else:
source = source.replace('https://', '')
source_git = "https://" + self._user + ":" + self._pass + "@" + source
self._source_git=source_git
self._repo = Repo.clone_from(source_git, self._path, **{
'branch': branch, 'single-branch': True, 'depth': 1})
self._source_http = source_git
def is_clean(self):
return self.is_initialized() and not self._repo.is_dirty(untracked_files=True)
def load(self, **kwargs):
if not self._repo:
if self._path and is_dir(path_join(self._path, '.git')):
self._repo = Repo(self._path)
return True
else:
return False
def info(self):
branch = self._repo.active_branch.name
curr_rev = self._repo.rev_parse(branch)
git = self.info_base()
source = self._repo.remotes.origin.url.split('@')
if len(source) > 1:
source = "https://" + source[1]
else:
source = self._repo.remotes.origin.url
return dict(g | return [module for module in os.listdir(path) if any(map(
lambda f: isfile(path_join(path, module, f)), (
'__manifest__.py', '__openerp__.py')))] | identifier_body |
mod.rs | use anyhow::{anyhow, Result};
use bevy_ecs::{
ComponentId, DynamicFetch, DynamicFetchResult, DynamicQuery, DynamicSystem, EntityBuilder,
QueryAccess, StatefulQuery, TypeAccess, TypeInfo, World,
};
use bincode::DefaultOptions;
use fs::OpenOptions;
use io::IoSlice;
use mem::ManuallyDrop;
use quill::ecs::TypeLayout;
use wasmer::{
import_namespace, imports, Array, FromToNativeWasmType, Function, HostEnvInitError, Instance,
LazyInit, Memory, Module, NativeFunc, Store, Type, ValueType, WasmPtr, WasmTypeList, WasmerEnv,
JIT, LLVM,
};
use wasmer_wasi::WasiState;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Default)]
struct PluginEnv<S> {
memory: LazyInit<Memory>,
buffer_reserve: LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>,
rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>,
state: Arc<Mutex<S>>,
layouts: Arc<Mutex<Layouts>>,
}
impl<S: Send + Sync + 'static> Clone for PluginEnv<S> {
fn clone(&self) -> Self {
Self {
memory: self.memory.clone(),
buffer_reserve: self.buffer_reserve.clone(),
rpcs: self.rpcs.clone(),
state: self.state.clone(),
layouts: Default::default(),
}
}
}
impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> {
fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> {
let memory = instance.exports.get_memory("memory")?;
self.memory.initialize(memory.clone());
self.buffer_reserve.initialize(
instance
.exports
.get_native_function("__quill_buffer_reserve")?,
);
Ok(())
}
}
impl<S: Send + Sync + 'static> PluginEnv<S> {
fn memory(&self) -> &Memory {
// TODO: handle errors.
self.memory.get_ref().unwrap()
}
fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> {
self.buffer_reserve.get_ref().unwrap()
}
fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer {
Buffer {
memory: self.memory(),
reserve: self.buffer_reserve(),
raw,
}
}
fn add_rpc<
'a,
Args: Serialize + DeserializeOwned + 'static,
R: Serialize + DeserializeOwned + 'static,
>(
&mut self,
name: &str,
callback: fn(&PluginEnv<S>, Args) -> R,
) -> Result<()> {
self.rpcs
.lock()
.map_err(|_| anyhow!("could not lock rpcs"))?
.insert(
name.to_owned(),
Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| {
let (_, args): (String, Args) =
bincode::deserialize(buffer.as_slice()).unwrap();
let result = callback(env, args);
buffer.clear();
bincode::serialize_into(buffer, &result).unwrap();
Ok(())
}),
);
Ok(())
}
fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> {
// TODO: requires access to buffer.
todo!()
}
}
pub struct Plugin {
instance: Instance,
env: PluginEnv<World>,
}
impl Plugin {
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
let mut env = PluginEnv::default();
let store = Store::new(&JIT::new(LLVM::default()).engine());
let module = Module::from_file(&store, &path)?;
let mut wasi_env = WasiState::new(
path.as_ref()
.file_name()
.and_then(OsStr::to_str)
.unwrap_or("unkown"),
)
.finalize()?;
let mut import_object = wasi_env.import_object(&module)?;
import_object.register(
"env",
import_namespace!({
"__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call),
}),
);
// env.add_rpc("players_push", |state, player: String| state.push(player))?;
// // TODO: Return reference to state?
// env.add_rpc("players", |state, ()| state.clone())?;
env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| {
let mut world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let mut builder = EntityBuilder::new();
for (layout, data) in entity.components {
builder.add_dynamic(
TypeInfo::of_external(
layouts.external_id(&layout),
Layout::new::<Vec<u8>>(),
|_| (),
),
data.as_slice(),
);
}
world.spawn(builder.build());
})?;
env.add_rpc(
"world_query",
// TODO: world should not be the state but union(world, layouts)
|env, access: quill::ecs::QueryAccess| {
let world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let query = access.query(&mut layouts).unwrap();
let access = Default::default();
let mut query: StatefulQuery<DynamicQuery, DynamicQuery> =
StatefulQuery::new(&world, &access, query);
for entity in query.iter_mut() {
entity.immutable;
entity.mutable;
}
},
)?;
let instance = Instance::new(&module, &import_object)?;
let start = instance.exports.get_function("_start")?;
start.call(&[])?;
Ok(Plugin { instance, env })
}
}
#[derive(Default)]
pub struct Layouts {
layouts: HashMap<quill::ecs::TypeLayout, u64>,
}
impl Layouts {
pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId {
ComponentId::ExternalId(self.external_id(layout))
}
pub fn external_id(&mut self, layout: &TypeLayout) -> u64 {
if let Some(component_id) = self.layouts.get(&layout) {
*component_id
} else {
let next = self.layouts.len() as u64;
self.layouts.insert(layout.clone(), next);
next
}
}
}
trait IntoBevyAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>;
fn component_ids(&self) -> Result<Vec<ComponentId>>;
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>;
}
impl IntoBevyAccess for quill::ecs::QueryAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> {
use quill::ecs::QueryAccess::*;
Ok(match self {
None => QueryAccess::None,
Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"),
Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"),
Optional(access) => {
QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?)
}
With(layout, access) => QueryAccess::With(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Without(layout, access) => QueryAccess::Without(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Union(accesses) => QueryAccess::Union(
accesses
.into_iter()
.map(|access| IntoBevyAccess::access(access, layouts))
.collect::<Result<Vec<QueryAccess>>>()?,
),
})
}
fn component_ids(&self) -> Result<Vec<ComponentId>> {
todo!()
}
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery> {
let mut query = DynamicQuery::default();
query.access = self.access(layouts)?;
// TODO: TypeInfo
Ok(query)
}
}
struct Buffer<'a> {
memory: &'a Memory,
// fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32)
reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>,
raw: WasmPtr<RawBuffer>,
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct RawBuffer {
ptr: WasmPtr<u8, Array>,
cap: u32,
len: u32,
}
unsafe impl ValueType for RawBuffer {}
impl<'a> Buffer<'a> {
fn reserve(&mut self, additional: u32) {
let raw = self.raw.deref(self.memory).unwrap().get();
if raw.cap < raw.len + additional {
self.reserve.call(self.raw, additional).unwrap();
}
}
fn clear(&mut self) {
let raw_cell = self.raw.deref(self.memory).unwrap();
raw_cell.set(RawBuffer {
| todo, u32, vec,
};
| random_line_split |
|
mod.rs | LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>,
rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>,
state: Arc<Mutex<S>>,
layouts: Arc<Mutex<Layouts>>,
}
impl<S: Send + Sync + 'static> Clone for PluginEnv<S> {
fn clone(&self) -> Self {
Self {
memory: self.memory.clone(),
buffer_reserve: self.buffer_reserve.clone(),
rpcs: self.rpcs.clone(),
state: self.state.clone(),
layouts: Default::default(),
}
}
}
impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> {
fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> {
let memory = instance.exports.get_memory("memory")?;
self.memory.initialize(memory.clone());
self.buffer_reserve.initialize(
instance
.exports
.get_native_function("__quill_buffer_reserve")?,
);
Ok(())
}
}
impl<S: Send + Sync + 'static> PluginEnv<S> {
fn memory(&self) -> &Memory {
// TODO: handle errors.
self.memory.get_ref().unwrap()
}
fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> {
self.buffer_reserve.get_ref().unwrap()
}
fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer {
Buffer {
memory: self.memory(),
reserve: self.buffer_reserve(),
raw,
}
}
fn add_rpc<
'a,
Args: Serialize + DeserializeOwned + 'static,
R: Serialize + DeserializeOwned + 'static,
>(
&mut self,
name: &str,
callback: fn(&PluginEnv<S>, Args) -> R,
) -> Result<()> {
self.rpcs
.lock()
.map_err(|_| anyhow!("could not lock rpcs"))?
.insert(
name.to_owned(),
Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| {
let (_, args): (String, Args) =
bincode::deserialize(buffer.as_slice()).unwrap();
let result = callback(env, args);
buffer.clear();
bincode::serialize_into(buffer, &result).unwrap();
Ok(())
}),
);
Ok(())
}
fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> {
// TODO: requires access to buffer.
todo!()
}
}
pub struct Plugin {
instance: Instance,
env: PluginEnv<World>,
}
impl Plugin {
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
let mut env = PluginEnv::default();
let store = Store::new(&JIT::new(LLVM::default()).engine());
let module = Module::from_file(&store, &path)?;
let mut wasi_env = WasiState::new(
path.as_ref()
.file_name()
.and_then(OsStr::to_str)
.unwrap_or("unkown"),
)
.finalize()?;
let mut import_object = wasi_env.import_object(&module)?;
import_object.register(
"env",
import_namespace!({
"__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call),
}),
);
// env.add_rpc("players_push", |state, player: String| state.push(player))?;
// // TODO: Return reference to state?
// env.add_rpc("players", |state, ()| state.clone())?;
env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| {
let mut world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let mut builder = EntityBuilder::new();
for (layout, data) in entity.components {
builder.add_dynamic(
TypeInfo::of_external(
layouts.external_id(&layout),
Layout::new::<Vec<u8>>(),
|_| (),
),
data.as_slice(),
);
}
world.spawn(builder.build());
})?;
env.add_rpc(
"world_query",
// TODO: world should not be the state but union(world, layouts)
|env, access: quill::ecs::QueryAccess| {
let world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let query = access.query(&mut layouts).unwrap();
let access = Default::default();
let mut query: StatefulQuery<DynamicQuery, DynamicQuery> =
StatefulQuery::new(&world, &access, query);
for entity in query.iter_mut() {
entity.immutable;
entity.mutable;
}
},
)?;
let instance = Instance::new(&module, &import_object)?;
let start = instance.exports.get_function("_start")?;
start.call(&[])?;
Ok(Plugin { instance, env })
}
}
#[derive(Default)]
pub struct Layouts {
layouts: HashMap<quill::ecs::TypeLayout, u64>,
}
impl Layouts {
pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId {
ComponentId::ExternalId(self.external_id(layout))
}
pub fn external_id(&mut self, layout: &TypeLayout) -> u64 {
if let Some(component_id) = self.layouts.get(&layout) {
*component_id
} else {
let next = self.layouts.len() as u64;
self.layouts.insert(layout.clone(), next);
next
}
}
}
trait IntoBevyAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>;
fn component_ids(&self) -> Result<Vec<ComponentId>>;
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>;
}
impl IntoBevyAccess for quill::ecs::QueryAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> {
use quill::ecs::QueryAccess::*;
Ok(match self {
None => QueryAccess::None,
Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"),
Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"),
Optional(access) => {
QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?)
}
With(layout, access) => QueryAccess::With(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Without(layout, access) => QueryAccess::Without(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Union(accesses) => QueryAccess::Union(
accesses
.into_iter()
.map(|access| IntoBevyAccess::access(access, layouts))
.collect::<Result<Vec<QueryAccess>>>()?,
),
})
}
fn component_ids(&self) -> Result<Vec<ComponentId>> {
todo!()
}
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery> {
let mut query = DynamicQuery::default();
query.access = self.access(layouts)?;
// TODO: TypeInfo
Ok(query)
}
}
struct Buffer<'a> {
memory: &'a Memory,
// fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32)
reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>,
raw: WasmPtr<RawBuffer>,
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct RawBuffer {
ptr: WasmPtr<u8, Array>,
cap: u32,
len: u32,
}
unsafe impl ValueType for RawBuffer {}
impl<'a> Buffer<'a> {
fn reserve(&mut self, additional: u32) {
let raw = self.raw.deref(self.memory).unwrap().get();
if raw.cap < raw.len + additional {
self.reserve.call(self.raw, additional).unwrap();
}
}
fn clear(&mut self) {
let raw_cell = self.raw.deref(self.memory).unwrap();
raw_cell.set(RawBuffer {
len: 0,
..raw_cell.get()
})
}
fn push(&mut self, byte: u8) {
self.extend_from_slice(&[byte]);
}
fn extend_from_slice(&mut self, other: &[u8]) {
self.reserve(other.len() as u32);
let raw_cell = self.raw.deref(self.memory).unwrap();
let raw = raw_cell.get();
raw.ptr
.deref(self.memory, raw.len, raw.cap)
.unwrap()
.into_iter()
.zip(other.iter())
.for_each(|(cell, value)| cell.set(*value));
raw_cell.set(RawBuffer {
len: raw.len + other.len() as u32,
..raw
});
}
fn as_slice(&self) -> &[u8] {
self
}
}
impl<'a> Write for Buffer<'a> {
#[inline]
fn | write | identifier_name |
|
mod.rs | , Function, HostEnvInitError, Instance,
LazyInit, Memory, Module, NativeFunc, Store, Type, ValueType, WasmPtr, WasmTypeList, WasmerEnv,
JIT, LLVM,
};
use wasmer_wasi::WasiState;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Default)]
struct PluginEnv<S> {
memory: LazyInit<Memory>,
buffer_reserve: LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>,
rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>,
state: Arc<Mutex<S>>,
layouts: Arc<Mutex<Layouts>>,
}
impl<S: Send + Sync + 'static> Clone for PluginEnv<S> {
fn clone(&self) -> Self {
Self {
memory: self.memory.clone(),
buffer_reserve: self.buffer_reserve.clone(),
rpcs: self.rpcs.clone(),
state: self.state.clone(),
layouts: Default::default(),
}
}
}
impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> {
fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> {
let memory = instance.exports.get_memory("memory")?;
self.memory.initialize(memory.clone());
self.buffer_reserve.initialize(
instance
.exports
.get_native_function("__quill_buffer_reserve")?,
);
Ok(())
}
}
impl<S: Send + Sync + 'static> PluginEnv<S> {
fn memory(&self) -> &Memory {
// TODO: handle errors.
self.memory.get_ref().unwrap()
}
fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> {
self.buffer_reserve.get_ref().unwrap()
}
fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer {
Buffer {
memory: self.memory(),
reserve: self.buffer_reserve(),
raw,
}
}
fn add_rpc<
'a,
Args: Serialize + DeserializeOwned + 'static,
R: Serialize + DeserializeOwned + 'static,
>(
&mut self,
name: &str,
callback: fn(&PluginEnv<S>, Args) -> R,
) -> Result<()> {
self.rpcs
.lock()
.map_err(|_| anyhow!("could not lock rpcs"))?
.insert(
name.to_owned(),
Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| {
let (_, args): (String, Args) =
bincode::deserialize(buffer.as_slice()).unwrap();
let result = callback(env, args);
buffer.clear();
bincode::serialize_into(buffer, &result).unwrap();
Ok(())
}),
);
Ok(())
}
fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> {
// TODO: requires access to buffer.
todo!()
}
}
pub struct Plugin {
instance: Instance,
env: PluginEnv<World>,
}
impl Plugin {
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
let mut env = PluginEnv::default();
let store = Store::new(&JIT::new(LLVM::default()).engine());
let module = Module::from_file(&store, &path)?;
let mut wasi_env = WasiState::new(
path.as_ref()
.file_name()
.and_then(OsStr::to_str)
.unwrap_or("unkown"),
)
.finalize()?;
let mut import_object = wasi_env.import_object(&module)?;
import_object.register(
"env",
import_namespace!({
"__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call),
}),
);
// env.add_rpc("players_push", |state, player: String| state.push(player))?;
// // TODO: Return reference to state?
// env.add_rpc("players", |state, ()| state.clone())?;
env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| {
let mut world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let mut builder = EntityBuilder::new();
for (layout, data) in entity.components {
builder.add_dynamic(
TypeInfo::of_external(
layouts.external_id(&layout),
Layout::new::<Vec<u8>>(),
|_| (),
),
data.as_slice(),
);
}
world.spawn(builder.build());
})?;
env.add_rpc(
"world_query",
// TODO: world should not be the state but union(world, layouts)
|env, access: quill::ecs::QueryAccess| {
let world = env.state.lock().unwrap();
let mut layouts = env.layouts.lock().unwrap();
let query = access.query(&mut layouts).unwrap();
let access = Default::default();
let mut query: StatefulQuery<DynamicQuery, DynamicQuery> =
StatefulQuery::new(&world, &access, query);
for entity in query.iter_mut() {
entity.immutable;
entity.mutable;
}
},
)?;
let instance = Instance::new(&module, &import_object)?;
let start = instance.exports.get_function("_start")?;
start.call(&[])?;
Ok(Plugin { instance, env })
}
}
#[derive(Default)]
pub struct Layouts {
layouts: HashMap<quill::ecs::TypeLayout, u64>,
}
impl Layouts {
pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId {
ComponentId::ExternalId(self.external_id(layout))
}
pub fn external_id(&mut self, layout: &TypeLayout) -> u64 {
if let Some(component_id) = self.layouts.get(&layout) {
*component_id
} else {
let next = self.layouts.len() as u64;
self.layouts.insert(layout.clone(), next);
next
}
}
}
trait IntoBevyAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>;
fn component_ids(&self) -> Result<Vec<ComponentId>>;
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>;
}
impl IntoBevyAccess for quill::ecs::QueryAccess {
fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> {
use quill::ecs::QueryAccess::*;
Ok(match self {
None => QueryAccess::None,
Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"),
Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"),
Optional(access) => {
QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?)
}
With(layout, access) => QueryAccess::With(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Without(layout, access) => QueryAccess::Without(
layouts.component_id(layout),
Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?),
),
Union(accesses) => QueryAccess::Union(
accesses
.into_iter()
.map(|access| IntoBevyAccess::access(access, layouts))
.collect::<Result<Vec<QueryAccess>>>()?,
),
})
}
fn component_ids(&self) -> Result<Vec<ComponentId>> {
todo!()
}
fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery> |
}
struct Buffer<'a> {
memory: &'a Memory,
// fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32)
reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>,
raw: WasmPtr<RawBuffer>,
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct RawBuffer {
ptr: WasmPtr<u8, Array>,
cap: u32,
len: u32,
}
unsafe impl ValueType for RawBuffer {}
impl<'a> Buffer<'a> {
fn reserve(&mut self, additional: u32) {
let raw = self.raw.deref(self.memory).unwrap().get();
if raw.cap < raw.len + additional {
self.reserve.call(self.raw, additional).unwrap();
}
}
fn clear(&mut self) {
let raw_cell = self.raw.deref(self.memory).unwrap();
raw_cell.set(RawBuffer {
len: 0,
..raw_cell.get()
})
}
fn push(&mut self, byte: u8) {
self.extend_from_slice(&[byte]);
}
fn extend_from_slice(&mut self, other: &[u8]) {
self.reserve(other.len() as u32);
let raw_cell = self.raw.deref(self.memory).unwrap();
let raw = raw_cell.get();
raw.ptr
.deref(self.memory, raw.len, raw.cap)
.unwrap()
. | {
let mut query = DynamicQuery::default();
query.access = self.access(layouts)?;
// TODO: TypeInfo
Ok(query)
} | identifier_body |
scheduler.rs | : usize) -> Result<Worker> {
let client = reqwest::Client::builder().timeout(None).build()?;
let mut url = format!("{}/analyze_address", url);
if timeout > 0 {
url.push_str("_timeout");
}
let timeout = Duration::from_secs((timeout * 60) as u64);
Ok(Worker {
client,
url: url,
timeout,
})
}
fn analyze(&self, address: Address) -> Result<AnalysisSuccess> {
info!("Analyzing {:x}", address.0);
let mut res = if self.timeout > Duration::from_secs(0) {
self
.client
.post(&self.url)
.json(&TimeoutAnalysis { address, timeout: self.timeout})
.send()?
} else {
self
.client
.post(&self.url)
.json(&address)
.send()?
};
Ok(res.json()?)
}
fn check_alive(&self) -> Result<()> {
self.client
.get(&format!("{}/alive", &self.url))
.send()
.map_err(|e| e.into())
.map(|_| ())
}
}
struct WorkerHandle<'a> {
worker: Option<Worker>,
scheduler: &'a Scheduler,
kill: bool,
}
impl<'a> WorkerHandle<'a> {
// specifically consume the handle to force readding the worker
fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> {
let res = self.worker.as_ref().unwrap().analyze(addr);
if let Err(ref error) = res {
error!("Error analyzing {:x?}, checking worker!", error);
if let Err(_) = self.worker.as_ref().unwrap().check_alive() {
error!("Worker died analyzing {:x?}, shuting down worker!", error);
self.kill = true;
} else {
return Err(Error::retry());
}
}
res
}
}
impl<'a> Drop for WorkerHandle<'a> {
fn drop(&mut self) {
if !self.kill {
let worker = self
.worker
.take()
.expect("Worker replaced before adding back");
self.scheduler.add_worker(worker)
} else {
self.worker
.take()
.expect("Worker replaced before adding back");
}
}
}
#[derive(Debug)]
struct Scheduler {
queue: Arc<Mutex<Vec<Worker>>>,
}
impl Scheduler {
fn new() -> Self {
let queue = Arc::new(Mutex::new(Vec::new()));
Self { queue }
}
fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self |
fn add_worker(&self, worker: Worker) {
self.queue.lock().unwrap().push(worker);
}
fn get_worker(&self) -> WorkerHandle {
let worker;
loop {
if let Some(w) = self.queue.lock().unwrap().pop() {
worker = Some(w);
break;
}
}
WorkerHandle {
worker,
scheduler: self,
kill: false,
}
}
}
type Result<T> = ::std::result::Result<T, Error>;
#[derive(Debug)]
struct Error {
kind: Kind,
}
impl Error {
fn from_str(s: String) -> Self {
Self {
kind: Kind::Execution(s),
}
}
fn retry() -> Self {
Self {
kind: Kind::Retry,
}
}
fn kind(&self) -> &Kind {
&self.kind
}
}
macro_rules! impl_error_kind {
(
$(#[$struct_attr:meta])*
enum Kind {
$( $enum_variant_name:ident($error_type:path), )+ ;
$( $single_variant_name:ident, )+
}
) => {
// meta attributes
$(#[$struct_attr])*
// enum definition
enum Kind {
$( $enum_variant_name($error_type), )+
$( $single_variant_name, )+
}
// impl error conversion for each type
$(
impl ::std::convert::From<$error_type> for Error {
fn from(error: $error_type) -> Self {
Self {
kind: Kind::$enum_variant_name(error),
}
}
}
)+
};
}
impl_error_kind!(#[derive(Debug)]
enum Kind {
Reqwest(reqwest::Error),
SerdeJson(serde_json::Error),
Log(log::SetLoggerError),
IO(std::io::Error),
Execution(String), ;
Retry,
});
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("EthAEG scheduler for analyzing a large list of contracts")
.arg(
Arg::with_name("INPUT")
.help("Set the list of accounts to scan")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SERVER_LIST")
.help("Set the list of backend servers")
.required(true)
.index(2),
)
.arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default"))
.arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format."))
.get_matches()
}
fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) {
let mut acc_list = String::new();
File::open(path)
.expect("Could not open account list")
.read_to_string(&mut acc_list)
.expect("Could not read account list");
let acc_vec: Vec<(usize, String)> = acc_list
.lines()
.filter_map(|line| match ACC_RE.captures(line) {
Some(cap) => {
let capture = cap.get(0).unwrap().as_str();
Some((0, capture.to_string()))
}
None => {
warn!("Could not process: {}", line);
None
}
})
.collect();
let len = acc_vec.len();
(Arc::new(Mutex::new(acc_vec)), len)
}
fn parse_server_list(path: &str) -> Vec<String> {
let mut server_list = String::new();
File::open(path)
.expect("Could not open server list")
.read_to_string(&mut server_list)
.expect("Could not read server list");
server_list
.lines()
.map(|line| {
let line = line.trim();
if line.starts_with("http") || line.starts_with("https") {
line.to_string()
} else {
format!("http://{}", line)
}
})
.collect()
}
lazy_static! {
static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap();
}
fn execute(
work_stack: Arc<Mutex<Vec<(usize, String)>>>,
scheduler: Arc<Scheduler>,
counter: Arc<AtomicUsize>,
acc_len: usize,
root_path: Arc<String>,
csv: &Mutex<BufWriter<File>>,
json: bool,
) -> Result<()> {
loop {
let (c, acc) = match work_stack.lock().unwrap().pop() {
Some(work) => work,
None => {
info!("Could not fetch new work, exiting loop!");
return Ok(());
}
};
if c >= 5 {
info!("Account {} seed {} times, discarding!", acc, c);
continue;
}
let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into());
let worker = scheduler.get_worker();
let res = worker.analyze(a);
match res {
Ok(r) => {
let file_path = if json {
format!("{}/{}.json", root_path, acc)
} else {
format!("{}/{}", root_path, acc)
};
let mut f = match File::create(file_path) {
Ok(f) => f,
Err(e) => {
error!("Could not create file for {}: {:?}", acc, e);
return Err(Error::from_str(format!(
"Could not create file for {}: {:?}",
acc, e
)));
}
};
if json {
if let AnalysisSuccess::Success(ref analysis) = r {
let mut res = (false, false, false);
if let Some(ref attacks) = analysis.attacks {
for attack in attacks {
if attack.attack_type == AttackType::StealMoney {
res.0 = true;
}
if attack.attack_type == AttackType::DeleteContract {
res.1 = true;
}
if attack.attack_type == AttackType::HijackControlFlow {
res.2 = true;
}
}
}
csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!");
}
let _write_res = f.write_all(json!(r).to_string().as_bytes());
} else {
let content = | {
let s = Scheduler::new();
for url in &urls {
s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail
}
s
} | identifier_body |
scheduler.rs | : usize) -> Result<Worker> {
let client = reqwest::Client::builder().timeout(None).build()?;
let mut url = format!("{}/analyze_address", url);
if timeout > 0 {
url.push_str("_timeout");
}
let timeout = Duration::from_secs((timeout * 60) as u64);
Ok(Worker {
client,
url: url,
timeout,
})
}
fn analyze(&self, address: Address) -> Result<AnalysisSuccess> {
info!("Analyzing {:x}", address.0);
let mut res = if self.timeout > Duration::from_secs(0) {
self
.client
.post(&self.url)
.json(&TimeoutAnalysis { address, timeout: self.timeout})
.send()?
} else {
self
.client
.post(&self.url)
.json(&address)
.send()?
};
Ok(res.json()?)
}
fn check_alive(&self) -> Result<()> {
self.client
.get(&format!("{}/alive", &self.url))
.send()
.map_err(|e| e.into())
.map(|_| ())
}
}
struct WorkerHandle<'a> {
worker: Option<Worker>,
scheduler: &'a Scheduler,
kill: bool,
}
impl<'a> WorkerHandle<'a> {
// specifically consume the handle to force readding the worker
fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> {
let res = self.worker.as_ref().unwrap().analyze(addr);
if let Err(ref error) = res {
error!("Error analyzing {:x?}, checking worker!", error);
if let Err(_) = self.worker.as_ref().unwrap().check_alive() {
error!("Worker died analyzing {:x?}, shuting down worker!", error);
self.kill = true;
} else {
return Err(Error::retry());
}
}
res
}
}
impl<'a> Drop for WorkerHandle<'a> {
fn drop(&mut self) {
if !self.kill {
let worker = self
.worker
.take()
.expect("Worker replaced before adding back");
self.scheduler.add_worker(worker)
} else {
self.worker
.take()
.expect("Worker replaced before adding back");
}
}
}
#[derive(Debug)]
struct Scheduler {
queue: Arc<Mutex<Vec<Worker>>>,
}
impl Scheduler {
fn new() -> Self {
let queue = Arc::new(Mutex::new(Vec::new()));
Self { queue }
}
fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self {
let s = Scheduler::new();
for url in &urls {
s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail
}
s
}
fn add_worker(&self, worker: Worker) {
self.queue.lock().unwrap().push(worker);
}
fn get_worker(&self) -> WorkerHandle {
let worker;
loop {
if let Some(w) = self.queue.lock().unwrap().pop() {
worker = Some(w);
break;
}
}
WorkerHandle {
worker,
scheduler: self,
kill: false,
}
}
}
type Result<T> = ::std::result::Result<T, Error>;
#[derive(Debug)]
struct Error {
kind: Kind,
}
impl Error {
fn from_str(s: String) -> Self {
Self {
kind: Kind::Execution(s),
}
}
fn retry() -> Self {
Self {
kind: Kind::Retry,
}
}
fn kind(&self) -> &Kind {
&self.kind
}
}
macro_rules! impl_error_kind {
(
$(#[$struct_attr:meta])*
enum Kind {
$( $enum_variant_name:ident($error_type:path), )+ ;
$( $single_variant_name:ident, )+
}
) => {
// meta attributes
$(#[$struct_attr])*
// enum definition
enum Kind {
$( $enum_variant_name($error_type), )+
$( $single_variant_name, )+
}
// impl error conversion for each type
$(
impl ::std::convert::From<$error_type> for Error {
fn from(error: $error_type) -> Self {
Self {
kind: Kind::$enum_variant_name(error),
}
}
}
)+
};
}
impl_error_kind!(#[derive(Debug)]
enum Kind {
Reqwest(reqwest::Error),
SerdeJson(serde_json::Error),
Log(log::SetLoggerError),
IO(std::io::Error),
Execution(String), ;
Retry,
});
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("EthAEG scheduler for analyzing a large list of contracts")
.arg(
Arg::with_name("INPUT")
.help("Set the list of accounts to scan")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SERVER_LIST")
.help("Set the list of backend servers")
.required(true)
.index(2),
)
.arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default"))
.arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format."))
.get_matches()
}
fn | (path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) {
let mut acc_list = String::new();
File::open(path)
.expect("Could not open account list")
.read_to_string(&mut acc_list)
.expect("Could not read account list");
let acc_vec: Vec<(usize, String)> = acc_list
.lines()
.filter_map(|line| match ACC_RE.captures(line) {
Some(cap) => {
let capture = cap.get(0).unwrap().as_str();
Some((0, capture.to_string()))
}
None => {
warn!("Could not process: {}", line);
None
}
})
.collect();
let len = acc_vec.len();
(Arc::new(Mutex::new(acc_vec)), len)
}
fn parse_server_list(path: &str) -> Vec<String> {
let mut server_list = String::new();
File::open(path)
.expect("Could not open server list")
.read_to_string(&mut server_list)
.expect("Could not read server list");
server_list
.lines()
.map(|line| {
let line = line.trim();
if line.starts_with("http") || line.starts_with("https") {
line.to_string()
} else {
format!("http://{}", line)
}
})
.collect()
}
lazy_static! {
static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap();
}
fn execute(
work_stack: Arc<Mutex<Vec<(usize, String)>>>,
scheduler: Arc<Scheduler>,
counter: Arc<AtomicUsize>,
acc_len: usize,
root_path: Arc<String>,
csv: &Mutex<BufWriter<File>>,
json: bool,
) -> Result<()> {
loop {
let (c, acc) = match work_stack.lock().unwrap().pop() {
Some(work) => work,
None => {
info!("Could not fetch new work, exiting loop!");
return Ok(());
}
};
if c >= 5 {
info!("Account {} seed {} times, discarding!", acc, c);
continue;
}
let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into());
let worker = scheduler.get_worker();
let res = worker.analyze(a);
match res {
Ok(r) => {
let file_path = if json {
format!("{}/{}.json", root_path, acc)
} else {
format!("{}/{}", root_path, acc)
};
let mut f = match File::create(file_path) {
Ok(f) => f,
Err(e) => {
error!("Could not create file for {}: {:?}", acc, e);
return Err(Error::from_str(format!(
"Could not create file for {}: {:?}",
acc, e
)));
}
};
if json {
if let AnalysisSuccess::Success(ref analysis) = r {
let mut res = (false, false, false);
if let Some(ref attacks) = analysis.attacks {
for attack in attacks {
if attack.attack_type == AttackType::StealMoney {
res.0 = true;
}
if attack.attack_type == AttackType::DeleteContract {
res.1 = true;
}
if attack.attack_type == AttackType::HijackControlFlow {
res.2 = true;
}
}
}
csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!");
}
let _write_res = f.write_all(json!(r).to_string().as_bytes());
} else {
let content = match | parse_account_list | identifier_name |
scheduler.rs | timeout: usize) -> Result<Worker> {
let client = reqwest::Client::builder().timeout(None).build()?;
let mut url = format!("{}/analyze_address", url);
if timeout > 0 {
url.push_str("_timeout");
}
let timeout = Duration::from_secs((timeout * 60) as u64);
Ok(Worker {
client,
url: url,
timeout,
})
}
fn analyze(&self, address: Address) -> Result<AnalysisSuccess> {
info!("Analyzing {:x}", address.0);
let mut res = if self.timeout > Duration::from_secs(0) {
self
.client
.post(&self.url)
.json(&TimeoutAnalysis { address, timeout: self.timeout})
.send()?
} else {
self
.client
.post(&self.url)
.json(&address)
.send()?
};
Ok(res.json()?)
}
fn check_alive(&self) -> Result<()> {
self.client
.get(&format!("{}/alive", &self.url))
.send()
.map_err(|e| e.into())
.map(|_| ())
}
}
struct WorkerHandle<'a> {
worker: Option<Worker>,
scheduler: &'a Scheduler,
kill: bool,
}
impl<'a> WorkerHandle<'a> {
// specifically consume the handle to force readding the worker
fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> {
let res = self.worker.as_ref().unwrap().analyze(addr);
if let Err(ref error) = res {
error!("Error analyzing {:x?}, checking worker!", error);
if let Err(_) = self.worker.as_ref().unwrap().check_alive() {
error!("Worker died analyzing {:x?}, shuting down worker!", error);
self.kill = true;
} else {
return Err(Error::retry()); | }
res
}
}
impl<'a> Drop for WorkerHandle<'a> {
fn drop(&mut self) {
if !self.kill {
let worker = self
.worker
.take()
.expect("Worker replaced before adding back");
self.scheduler.add_worker(worker)
} else {
self.worker
.take()
.expect("Worker replaced before adding back");
}
}
}
#[derive(Debug)]
struct Scheduler {
queue: Arc<Mutex<Vec<Worker>>>,
}
impl Scheduler {
fn new() -> Self {
let queue = Arc::new(Mutex::new(Vec::new()));
Self { queue }
}
fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self {
let s = Scheduler::new();
for url in &urls {
s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail
}
s
}
fn add_worker(&self, worker: Worker) {
self.queue.lock().unwrap().push(worker);
}
fn get_worker(&self) -> WorkerHandle {
let worker;
loop {
if let Some(w) = self.queue.lock().unwrap().pop() {
worker = Some(w);
break;
}
}
WorkerHandle {
worker,
scheduler: self,
kill: false,
}
}
}
type Result<T> = ::std::result::Result<T, Error>;
#[derive(Debug)]
struct Error {
kind: Kind,
}
impl Error {
fn from_str(s: String) -> Self {
Self {
kind: Kind::Execution(s),
}
}
fn retry() -> Self {
Self {
kind: Kind::Retry,
}
}
fn kind(&self) -> &Kind {
&self.kind
}
}
macro_rules! impl_error_kind {
(
$(#[$struct_attr:meta])*
enum Kind {
$( $enum_variant_name:ident($error_type:path), )+ ;
$( $single_variant_name:ident, )+
}
) => {
// meta attributes
$(#[$struct_attr])*
// enum definition
enum Kind {
$( $enum_variant_name($error_type), )+
$( $single_variant_name, )+
}
// impl error conversion for each type
$(
impl ::std::convert::From<$error_type> for Error {
fn from(error: $error_type) -> Self {
Self {
kind: Kind::$enum_variant_name(error),
}
}
}
)+
};
}
impl_error_kind!(#[derive(Debug)]
enum Kind {
Reqwest(reqwest::Error),
SerdeJson(serde_json::Error),
Log(log::SetLoggerError),
IO(std::io::Error),
Execution(String), ;
Retry,
});
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("EthAEG scheduler for analyzing a large list of contracts")
.arg(
Arg::with_name("INPUT")
.help("Set the list of accounts to scan")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SERVER_LIST")
.help("Set the list of backend servers")
.required(true)
.index(2),
)
.arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default"))
.arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format."))
.get_matches()
}
fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) {
let mut acc_list = String::new();
File::open(path)
.expect("Could not open account list")
.read_to_string(&mut acc_list)
.expect("Could not read account list");
let acc_vec: Vec<(usize, String)> = acc_list
.lines()
.filter_map(|line| match ACC_RE.captures(line) {
Some(cap) => {
let capture = cap.get(0).unwrap().as_str();
Some((0, capture.to_string()))
}
None => {
warn!("Could not process: {}", line);
None
}
})
.collect();
let len = acc_vec.len();
(Arc::new(Mutex::new(acc_vec)), len)
}
fn parse_server_list(path: &str) -> Vec<String> {
let mut server_list = String::new();
File::open(path)
.expect("Could not open server list")
.read_to_string(&mut server_list)
.expect("Could not read server list");
server_list
.lines()
.map(|line| {
let line = line.trim();
if line.starts_with("http") || line.starts_with("https") {
line.to_string()
} else {
format!("http://{}", line)
}
})
.collect()
}
lazy_static! {
static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap();
}
fn execute(
work_stack: Arc<Mutex<Vec<(usize, String)>>>,
scheduler: Arc<Scheduler>,
counter: Arc<AtomicUsize>,
acc_len: usize,
root_path: Arc<String>,
csv: &Mutex<BufWriter<File>>,
json: bool,
) -> Result<()> {
loop {
let (c, acc) = match work_stack.lock().unwrap().pop() {
Some(work) => work,
None => {
info!("Could not fetch new work, exiting loop!");
return Ok(());
}
};
if c >= 5 {
info!("Account {} seed {} times, discarding!", acc, c);
continue;
}
let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into());
let worker = scheduler.get_worker();
let res = worker.analyze(a);
match res {
Ok(r) => {
let file_path = if json {
format!("{}/{}.json", root_path, acc)
} else {
format!("{}/{}", root_path, acc)
};
let mut f = match File::create(file_path) {
Ok(f) => f,
Err(e) => {
error!("Could not create file for {}: {:?}", acc, e);
return Err(Error::from_str(format!(
"Could not create file for {}: {:?}",
acc, e
)));
}
};
if json {
if let AnalysisSuccess::Success(ref analysis) = r {
let mut res = (false, false, false);
if let Some(ref attacks) = analysis.attacks {
for attack in attacks {
if attack.attack_type == AttackType::StealMoney {
res.0 = true;
}
if attack.attack_type == AttackType::DeleteContract {
res.1 = true;
}
if attack.attack_type == AttackType::HijackControlFlow {
res.2 = true;
}
}
}
csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!");
}
let _write_res = f.write_all(json!(r).to_string().as_bytes());
} else {
let content = match | } | random_line_split |
scheduler.rs | * 60) as u64);
Ok(Worker {
client,
url: url,
timeout,
})
}
fn analyze(&self, address: Address) -> Result<AnalysisSuccess> {
info!("Analyzing {:x}", address.0);
let mut res = if self.timeout > Duration::from_secs(0) {
self
.client
.post(&self.url)
.json(&TimeoutAnalysis { address, timeout: self.timeout})
.send()?
} else {
self
.client
.post(&self.url)
.json(&address)
.send()?
};
Ok(res.json()?)
}
fn check_alive(&self) -> Result<()> {
self.client
.get(&format!("{}/alive", &self.url))
.send()
.map_err(|e| e.into())
.map(|_| ())
}
}
struct WorkerHandle<'a> {
worker: Option<Worker>,
scheduler: &'a Scheduler,
kill: bool,
}
impl<'a> WorkerHandle<'a> {
// specifically consume the handle to force readding the worker
fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> {
let res = self.worker.as_ref().unwrap().analyze(addr);
if let Err(ref error) = res {
error!("Error analyzing {:x?}, checking worker!", error);
if let Err(_) = self.worker.as_ref().unwrap().check_alive() {
error!("Worker died analyzing {:x?}, shuting down worker!", error);
self.kill = true;
} else {
return Err(Error::retry());
}
}
res
}
}
impl<'a> Drop for WorkerHandle<'a> {
fn drop(&mut self) {
if !self.kill {
let worker = self
.worker
.take()
.expect("Worker replaced before adding back");
self.scheduler.add_worker(worker)
} else {
self.worker
.take()
.expect("Worker replaced before adding back");
}
}
}
#[derive(Debug)]
struct Scheduler {
queue: Arc<Mutex<Vec<Worker>>>,
}
impl Scheduler {
fn new() -> Self {
let queue = Arc::new(Mutex::new(Vec::new()));
Self { queue }
}
fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self {
let s = Scheduler::new();
for url in &urls {
s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail
}
s
}
fn add_worker(&self, worker: Worker) {
self.queue.lock().unwrap().push(worker);
}
fn get_worker(&self) -> WorkerHandle {
let worker;
loop {
if let Some(w) = self.queue.lock().unwrap().pop() {
worker = Some(w);
break;
}
}
WorkerHandle {
worker,
scheduler: self,
kill: false,
}
}
}
type Result<T> = ::std::result::Result<T, Error>;
#[derive(Debug)]
struct Error {
kind: Kind,
}
impl Error {
fn from_str(s: String) -> Self {
Self {
kind: Kind::Execution(s),
}
}
fn retry() -> Self {
Self {
kind: Kind::Retry,
}
}
fn kind(&self) -> &Kind {
&self.kind
}
}
macro_rules! impl_error_kind {
(
$(#[$struct_attr:meta])*
enum Kind {
$( $enum_variant_name:ident($error_type:path), )+ ;
$( $single_variant_name:ident, )+
}
) => {
// meta attributes
$(#[$struct_attr])*
// enum definition
enum Kind {
$( $enum_variant_name($error_type), )+
$( $single_variant_name, )+
}
// impl error conversion for each type
$(
impl ::std::convert::From<$error_type> for Error {
fn from(error: $error_type) -> Self {
Self {
kind: Kind::$enum_variant_name(error),
}
}
}
)+
};
}
impl_error_kind!(#[derive(Debug)]
enum Kind {
Reqwest(reqwest::Error),
SerdeJson(serde_json::Error),
Log(log::SetLoggerError),
IO(std::io::Error),
Execution(String), ;
Retry,
});
fn parse_args<'a>() -> ArgMatches<'a> {
App::new("EthAEG scheduler for analyzing a large list of contracts")
.arg(
Arg::with_name("INPUT")
.help("Set the list of accounts to scan")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SERVER_LIST")
.help("Set the list of backend servers")
.required(true)
.index(2),
)
.arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default"))
.arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format."))
.get_matches()
}
fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) {
let mut acc_list = String::new();
File::open(path)
.expect("Could not open account list")
.read_to_string(&mut acc_list)
.expect("Could not read account list");
let acc_vec: Vec<(usize, String)> = acc_list
.lines()
.filter_map(|line| match ACC_RE.captures(line) {
Some(cap) => {
let capture = cap.get(0).unwrap().as_str();
Some((0, capture.to_string()))
}
None => {
warn!("Could not process: {}", line);
None
}
})
.collect();
let len = acc_vec.len();
(Arc::new(Mutex::new(acc_vec)), len)
}
fn parse_server_list(path: &str) -> Vec<String> {
let mut server_list = String::new();
File::open(path)
.expect("Could not open server list")
.read_to_string(&mut server_list)
.expect("Could not read server list");
server_list
.lines()
.map(|line| {
let line = line.trim();
if line.starts_with("http") || line.starts_with("https") {
line.to_string()
} else {
format!("http://{}", line)
}
})
.collect()
}
lazy_static! {
static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap();
}
fn execute(
work_stack: Arc<Mutex<Vec<(usize, String)>>>,
scheduler: Arc<Scheduler>,
counter: Arc<AtomicUsize>,
acc_len: usize,
root_path: Arc<String>,
csv: &Mutex<BufWriter<File>>,
json: bool,
) -> Result<()> {
loop {
let (c, acc) = match work_stack.lock().unwrap().pop() {
Some(work) => work,
None => {
info!("Could not fetch new work, exiting loop!");
return Ok(());
}
};
if c >= 5 {
info!("Account {} seed {} times, discarding!", acc, c);
continue;
}
let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into());
let worker = scheduler.get_worker();
let res = worker.analyze(a);
match res {
Ok(r) => {
let file_path = if json {
format!("{}/{}.json", root_path, acc)
} else {
format!("{}/{}", root_path, acc)
};
let mut f = match File::create(file_path) {
Ok(f) => f,
Err(e) => {
error!("Could not create file for {}: {:?}", acc, e);
return Err(Error::from_str(format!(
"Could not create file for {}: {:?}",
acc, e
)));
}
};
if json {
if let AnalysisSuccess::Success(ref analysis) = r {
let mut res = (false, false, false);
if let Some(ref attacks) = analysis.attacks {
for attack in attacks {
if attack.attack_type == AttackType::StealMoney {
res.0 = true;
}
if attack.attack_type == AttackType::DeleteContract {
res.1 = true;
}
if attack.attack_type == AttackType::HijackControlFlow {
res.2 = true;
}
}
}
csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!");
}
let _write_res = f.write_all(json!(r).to_string().as_bytes());
} else {
let content = match r {
AnalysisSuccess::Success(analysis) => {
let mut res = (false, false, false);
if let Some(ref attacks) = analysis.attacks {
for attack in attacks {
if attack.attack_type == AttackType::StealMoney | {
res.0 = true;
} | conditional_block |
|
lib.rs | <Body>) -> Option<BoxFuture<'_, Response<Body>>>;
}
impl Handler for Arc<dyn Handler> {
fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> {
(**self).handles(request)
}
}
impl TestServer {
/// return the scheme of the TestServer
fn scheme(&self) -> &'static str {
if self.use_https {
"https"
} else {
"http"
}
}
/// Returns the URL that can be used to connect to this repository from this device.
pub fn local_url(&self) -> String {
format!("{}://localhost:{}", self.scheme(), self.addr.port())
}
/// Returns the URL for the given path that can be used to connect to this repository from this
/// device.
pub fn local_url_for_path(&self, path: &str) -> String {
let path = path.trim_start_matches('/');
format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path)
}
/// Gracefully signal the server to stop and returns a future that resolves when it terminates.
pub fn stop(self) -> impl Future<Output = ()> {
self.stop.send(()).expect("remote end to still be open");
self.task
}
/// Internal helper which iterates over all Handlers until it finds one that will respond to the
/// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND.
async fn handle_request(
handlers: Arc<Vec<Arc<dyn Handler>>>,
req: Request<Body>,
) -> Response<Body> {
let response = handlers.iter().find_map(|h| h.handles(&req));
match response {
Some(response) => response.await,
None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(),
}
}
/// Create a Builder
pub fn builder() -> TestServerBuilder {
TestServerBuilder::new()
}
}
/// A builder to construct a `TestServer`.
#[derive(Default)]
pub struct TestServerBuilder {
handlers: Vec<Arc<dyn Handler>>,
https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>,
}
impl TestServerBuilder {
/// Create a new TestServerBuilder
pub fn new() -> Self {
Self::default()
}
/// Serve over TLS, using a server certificate rooted the provided certs
pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self {
let cert_chain = parse_cert_chain(cert_chain);
let private_key = parse_private_key(private_key);
self.https_certs = Some((cert_chain, private_key));
self
}
/// Add a Handler which implements the server's behavior. These are given the ability to
/// handle a request in the order in which they are added to the `TestServerBuilder`.
pub fn handler(mut self, handler: impl Handler + 'static) -> Self {
self.handlers.push(Arc::new(handler));
self
}
/// Spawn the server on the current executor, returning a handle to manage the server.
pub async fn start(self) -> TestServer {
let (mut listener, addr) = {
let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0);
let listener = bind_listener(&addr).await;
let local_addr = listener.local_addr().unwrap();
(listener, local_addr)
};
let (stop, rx_stop) = futures::channel::oneshot::channel();
let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs {
// build a server configuration using a test CA and cert chain
let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new());
tls_config.set_single_cert(cert_chain, private_key).unwrap();
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config));
(Some(tls_acceptor), true)
} else {
(None, false)
};
let task = fasync::Task::spawn(async move {
let listener = accept_stream(&mut listener);
let listener = listener
.map_err(Error::from)
.map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn });
let connections = if let Some(tls_acceptor) = tls_acceptor {
// wrap incoming tcp streams
listener
.and_then(move |conn| {
tls_acceptor.accept(conn).map(|res| match res {
Ok(conn) => {
Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
}
Err(e) => Err(Error::from(e)),
})
})
.boxed() // connections
} else {
listener
.map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
.boxed() // connections
};
// This is the root Arc<Vec<Arc<dyn Handler>>>.
let handlers = Arc::new(self.handlers);
let make_svc = make_service_fn(move |_socket| {
// Each connection to the server receives a separate service_fn instance, and so
// needs it's own copy of the handlers, this is a factory of sorts.
let handlers = Arc::clone(&handlers);
async move {
Ok::<_, Infallible>(service_fn(move |req| {
// Each request made by a connection is serviced by the service_fn created from
// this scope, which is why there is another cloning of the Arc of Handlers.
let method = req.method().to_owned();
let path = req.uri().path().to_owned();
TestServer::handle_request(Arc::clone(&handlers), req)
.inspect(move |x| {
println!(
"{} [test http] {} {} => {}",
Utc::now().format("%T.%6f"),
method,
path,
x.status()
)
})
.map(Ok::<_, Infallible>)
}))
}
});
Server::builder(from_stream(connections))
.executor(fuchsia_hyper::Executor)
.serve(make_svc)
.with_graceful_shutdown(
rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())),
)
.unwrap_or_else(|e| panic!("error serving repo over http: {}", e))
.await;
});
TestServer { stop, addr, use_https, task }
}
}
#[cfg(target_os = "fuchsia")]
async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener {
fuchsia_async::net::TcpListener::bind(addr).unwrap()
}
#[cfg(not(target_os = "fuchsia"))]
async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener {
async_net::TcpListener::bind(addr).await.unwrap()
}
#[cfg(target_os = "fuchsia")]
fn | <'a>(
listener: &'a mut fuchsia_async::net::TcpListener,
) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a {
use std::task::{Context, Poll};
#[pin_project::pin_project]
struct AcceptStream<'a> {
#[pin]
listener: &'a mut fuchsia_async::net::TcpListener,
}
impl<'a> Stream for AcceptStream<'a> {
type Item = std::io::Result<fuchsia_async::net::TcpStream>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.listener.async_accept(cx) {
Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
Poll::Pending => Poll::Pending,
}
}
}
AcceptStream { listener }
}
#[cfg(not(target_os = "fuchsia"))]
fn accept_stream<'a>(
listener: &'a mut async_net::TcpListener,
) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a {
listener.incoming()
}
fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> {
rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse")
}
fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey {
let keys =
rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse");
assert_eq!(keys.len(), 1, "expecting a single private key");
keys.into_iter().next().unwrap()
}
trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
// These are a set of useful functions when writing tests.
/// Create a GET request for a given url, which can be used with any hyper client.
pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> {
Request::get(url.as_ref | accept_stream | identifier_name |
lib.rs | <Body>) -> Option<BoxFuture<'_, Response<Body>>>;
}
impl Handler for Arc<dyn Handler> {
fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> {
(**self).handles(request)
}
}
impl TestServer {
/// return the scheme of the TestServer
fn scheme(&self) -> &'static str {
if self.use_https {
"https"
} else {
"http"
}
}
/// Returns the URL that can be used to connect to this repository from this device.
pub fn local_url(&self) -> String {
format!("{}://localhost:{}", self.scheme(), self.addr.port())
}
/// Returns the URL for the given path that can be used to connect to this repository from this
/// device.
pub fn local_url_for_path(&self, path: &str) -> String {
let path = path.trim_start_matches('/');
format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path)
}
/// Gracefully signal the server to stop and returns a future that resolves when it terminates.
pub fn stop(self) -> impl Future<Output = ()> {
self.stop.send(()).expect("remote end to still be open");
self.task
}
/// Internal helper which iterates over all Handlers until it finds one that will respond to the
/// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND.
async fn handle_request(
handlers: Arc<Vec<Arc<dyn Handler>>>,
req: Request<Body>,
) -> Response<Body> {
let response = handlers.iter().find_map(|h| h.handles(&req));
match response {
Some(response) => response.await,
None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(),
}
}
/// Create a Builder
pub fn builder() -> TestServerBuilder {
TestServerBuilder::new()
}
}
/// A builder to construct a `TestServer`.
#[derive(Default)]
pub struct TestServerBuilder {
handlers: Vec<Arc<dyn Handler>>,
https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>,
}
impl TestServerBuilder {
/// Create a new TestServerBuilder
pub fn new() -> Self {
Self::default()
}
/// Serve over TLS, using a server certificate rooted the provided certs
pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self {
let cert_chain = parse_cert_chain(cert_chain);
let private_key = parse_private_key(private_key);
self.https_certs = Some((cert_chain, private_key));
self
}
/// Add a Handler which implements the server's behavior. These are given the ability to
/// handle a request in the order in which they are added to the `TestServerBuilder`.
pub fn handler(mut self, handler: impl Handler + 'static) -> Self {
self.handlers.push(Arc::new(handler));
self
}
/// Spawn the server on the current executor, returning a handle to manage the server.
pub async fn start(self) -> TestServer {
let (mut listener, addr) = {
let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0);
let listener = bind_listener(&addr).await;
let local_addr = listener.local_addr().unwrap();
(listener, local_addr)
};
let (stop, rx_stop) = futures::channel::oneshot::channel();
let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs | else {
(None, false)
};
let task = fasync::Task::spawn(async move {
let listener = accept_stream(&mut listener);
let listener = listener
.map_err(Error::from)
.map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn });
let connections = if let Some(tls_acceptor) = tls_acceptor {
// wrap incoming tcp streams
listener
.and_then(move |conn| {
tls_acceptor.accept(conn).map(|res| match res {
Ok(conn) => {
Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
}
Err(e) => Err(Error::from(e)),
})
})
.boxed() // connections
} else {
listener
.map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
.boxed() // connections
};
// This is the root Arc<Vec<Arc<dyn Handler>>>.
let handlers = Arc::new(self.handlers);
let make_svc = make_service_fn(move |_socket| {
// Each connection to the server receives a separate service_fn instance, and so
// needs it's own copy of the handlers, this is a factory of sorts.
let handlers = Arc::clone(&handlers);
async move {
Ok::<_, Infallible>(service_fn(move |req| {
// Each request made by a connection is serviced by the service_fn created from
// this scope, which is why there is another cloning of the Arc of Handlers.
let method = req.method().to_owned();
let path = req.uri().path().to_owned();
TestServer::handle_request(Arc::clone(&handlers), req)
.inspect(move |x| {
println!(
"{} [test http] {} {} => {}",
Utc::now().format("%T.%6f"),
method,
path,
x.status()
)
})
.map(Ok::<_, Infallible>)
}))
}
});
Server::builder(from_stream(connections))
.executor(fuchsia_hyper::Executor)
.serve(make_svc)
.with_graceful_shutdown(
rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())),
)
.unwrap_or_else(|e| panic!("error serving repo over http: {}", e))
.await;
});
TestServer { stop, addr, use_https, task }
}
}
#[cfg(target_os = "fuchsia")]
async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener {
fuchsia_async::net::TcpListener::bind(addr).unwrap()
}
#[cfg(not(target_os = "fuchsia"))]
async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener {
async_net::TcpListener::bind(addr).await.unwrap()
}
#[cfg(target_os = "fuchsia")]
fn accept_stream<'a>(
listener: &'a mut fuchsia_async::net::TcpListener,
) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a {
use std::task::{Context, Poll};
#[pin_project::pin_project]
struct AcceptStream<'a> {
#[pin]
listener: &'a mut fuchsia_async::net::TcpListener,
}
impl<'a> Stream for AcceptStream<'a> {
type Item = std::io::Result<fuchsia_async::net::TcpStream>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.listener.async_accept(cx) {
Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
Poll::Pending => Poll::Pending,
}
}
}
AcceptStream { listener }
}
#[cfg(not(target_os = "fuchsia"))]
fn accept_stream<'a>(
listener: &'a mut async_net::TcpListener,
) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a {
listener.incoming()
}
fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> {
rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse")
}
fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey {
let keys =
rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse");
assert_eq!(keys.len(), 1, "expecting a single private key");
keys.into_iter().next().unwrap()
}
trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
// These are a set of useful functions when writing tests.
/// Create a GET request for a given url, which can be used with any hyper client.
pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> {
Request::get(url.as_ref | {
// build a server configuration using a test CA and cert chain
let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new());
tls_config.set_single_cert(cert_chain, private_key).unwrap();
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config));
(Some(tls_acceptor), true)
} | conditional_block |
lib.rs | <Body>) -> Option<BoxFuture<'_, Response<Body>>>;
}
impl Handler for Arc<dyn Handler> {
fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> {
(**self).handles(request)
}
}
impl TestServer {
/// return the scheme of the TestServer
fn scheme(&self) -> &'static str {
if self.use_https {
"https"
} else {
"http"
}
}
/// Returns the URL that can be used to connect to this repository from this device.
pub fn local_url(&self) -> String {
format!("{}://localhost:{}", self.scheme(), self.addr.port())
}
/// Returns the URL for the given path that can be used to connect to this repository from this
/// device.
pub fn local_url_for_path(&self, path: &str) -> String {
let path = path.trim_start_matches('/');
format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path)
}
/// Gracefully signal the server to stop and returns a future that resolves when it terminates.
pub fn stop(self) -> impl Future<Output = ()> {
self.stop.send(()).expect("remote end to still be open");
self.task
}
/// Internal helper which iterates over all Handlers until it finds one that will respond to the
/// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND.
async fn handle_request(
handlers: Arc<Vec<Arc<dyn Handler>>>,
req: Request<Body>,
) -> Response<Body> {
let response = handlers.iter().find_map(|h| h.handles(&req));
match response {
Some(response) => response.await,
None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(),
}
}
/// Create a Builder
pub fn builder() -> TestServerBuilder {
TestServerBuilder::new()
}
}
/// A builder to construct a `TestServer`.
#[derive(Default)]
pub struct TestServerBuilder {
handlers: Vec<Arc<dyn Handler>>,
https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>,
}
impl TestServerBuilder {
/// Create a new TestServerBuilder
pub fn new() -> Self {
Self::default()
}
/// Serve over TLS, using a server certificate rooted the provided certs
pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self {
let cert_chain = parse_cert_chain(cert_chain);
let private_key = parse_private_key(private_key);
self.https_certs = Some((cert_chain, private_key));
self
}
/// Add a Handler which implements the server's behavior. These are given the ability to
/// handle a request in the order in which they are added to the `TestServerBuilder`.
pub fn handler(mut self, handler: impl Handler + 'static) -> Self {
self.handlers.push(Arc::new(handler));
self
}
/// Spawn the server on the current executor, returning a handle to manage the server.
pub async fn start(self) -> TestServer {
let (mut listener, addr) = {
let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0);
let listener = bind_listener(&addr).await;
let local_addr = listener.local_addr().unwrap();
(listener, local_addr)
};
let (stop, rx_stop) = futures::channel::oneshot::channel();
let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs {
// build a server configuration using a test CA and cert chain
let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new());
tls_config.set_single_cert(cert_chain, private_key).unwrap();
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config));
(Some(tls_acceptor), true)
} else {
(None, false)
};
let task = fasync::Task::spawn(async move {
let listener = accept_stream(&mut listener);
let listener = listener
.map_err(Error::from)
.map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn });
let connections = if let Some(tls_acceptor) = tls_acceptor {
// wrap incoming tcp streams
listener
.and_then(move |conn| {
tls_acceptor.accept(conn).map(|res| match res {
Ok(conn) => {
Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
}
Err(e) => Err(Error::from(e)),
})
})
.boxed() // connections
} else {
listener
.map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
.boxed() // connections
};
// This is the root Arc<Vec<Arc<dyn Handler>>>.
let handlers = Arc::new(self.handlers);
let make_svc = make_service_fn(move |_socket| {
// Each connection to the server receives a separate service_fn instance, and so
// needs it's own copy of the handlers, this is a factory of sorts.
let handlers = Arc::clone(&handlers);
async move {
Ok::<_, Infallible>(service_fn(move |req| {
// Each request made by a connection is serviced by the service_fn created from
// this scope, which is why there is another cloning of the Arc of Handlers.
let method = req.method().to_owned();
let path = req.uri().path().to_owned();
TestServer::handle_request(Arc::clone(&handlers), req)
.inspect(move |x| {
println!(
"{} [test http] {} {} => {}",
Utc::now().format("%T.%6f"),
method,
path,
x.status()
)
})
.map(Ok::<_, Infallible>)
}))
}
});
Server::builder(from_stream(connections))
.executor(fuchsia_hyper::Executor)
.serve(make_svc)
.with_graceful_shutdown(
rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())),
)
.unwrap_or_else(|e| panic!("error serving repo over http: {}", e))
.await;
});
TestServer { stop, addr, use_https, task }
}
}
#[cfg(target_os = "fuchsia")]
async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener {
fuchsia_async::net::TcpListener::bind(addr).unwrap()
}
#[cfg(not(target_os = "fuchsia"))]
async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener {
async_net::TcpListener::bind(addr).await.unwrap()
}
#[cfg(target_os = "fuchsia")]
fn accept_stream<'a>(
listener: &'a mut fuchsia_async::net::TcpListener,
) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a {
use std::task::{Context, Poll};
#[pin_project::pin_project]
struct AcceptStream<'a> {
#[pin]
listener: &'a mut fuchsia_async::net::TcpListener,
}
impl<'a> Stream for AcceptStream<'a> {
type Item = std::io::Result<fuchsia_async::net::TcpStream>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> |
}
AcceptStream { listener }
}
#[cfg(not(target_os = "fuchsia"))]
fn accept_stream<'a>(
listener: &'a mut async_net::TcpListener,
) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a {
listener.incoming()
}
fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> {
rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse")
}
fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey {
let keys =
rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse");
assert_eq!(keys.len(), 1, "expecting a single private key");
keys.into_iter().next().unwrap()
}
trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
// These are a set of useful functions when writing tests.
/// Create a GET request for a given url, which can be used with any hyper client.
pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> {
Request::get(url.as | {
let mut this = self.project();
match this.listener.async_accept(cx) {
Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
Poll::Pending => Poll::Pending,
}
} | identifier_body |
lib.rs | <Body>) -> Option<BoxFuture<'_, Response<Body>>>;
}
impl Handler for Arc<dyn Handler> {
fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> {
(**self).handles(request)
}
}
impl TestServer {
/// return the scheme of the TestServer
fn scheme(&self) -> &'static str {
if self.use_https {
"https"
} else {
"http"
}
}
/// Returns the URL that can be used to connect to this repository from this device.
pub fn local_url(&self) -> String {
format!("{}://localhost:{}", self.scheme(), self.addr.port())
}
/// Returns the URL for the given path that can be used to connect to this repository from this
/// device.
pub fn local_url_for_path(&self, path: &str) -> String {
let path = path.trim_start_matches('/');
format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path)
}
/// Gracefully signal the server to stop and returns a future that resolves when it terminates.
pub fn stop(self) -> impl Future<Output = ()> {
self.stop.send(()).expect("remote end to still be open");
self.task
}
/// Internal helper which iterates over all Handlers until it finds one that will respond to the
/// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND.
async fn handle_request(
handlers: Arc<Vec<Arc<dyn Handler>>>,
req: Request<Body>,
) -> Response<Body> {
let response = handlers.iter().find_map(|h| h.handles(&req));
match response {
Some(response) => response.await,
None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(),
}
}
|
/// A builder to construct a `TestServer`.
#[derive(Default)]
pub struct TestServerBuilder {
handlers: Vec<Arc<dyn Handler>>,
https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>,
}
impl TestServerBuilder {
/// Create a new TestServerBuilder
pub fn new() -> Self {
Self::default()
}
/// Serve over TLS, using a server certificate rooted the provided certs
pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self {
let cert_chain = parse_cert_chain(cert_chain);
let private_key = parse_private_key(private_key);
self.https_certs = Some((cert_chain, private_key));
self
}
/// Add a Handler which implements the server's behavior. These are given the ability to
/// handle a request in the order in which they are added to the `TestServerBuilder`.
pub fn handler(mut self, handler: impl Handler + 'static) -> Self {
self.handlers.push(Arc::new(handler));
self
}
/// Spawn the server on the current executor, returning a handle to manage the server.
pub async fn start(self) -> TestServer {
let (mut listener, addr) = {
let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0);
let listener = bind_listener(&addr).await;
let local_addr = listener.local_addr().unwrap();
(listener, local_addr)
};
let (stop, rx_stop) = futures::channel::oneshot::channel();
let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs {
// build a server configuration using a test CA and cert chain
let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new());
tls_config.set_single_cert(cert_chain, private_key).unwrap();
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config));
(Some(tls_acceptor), true)
} else {
(None, false)
};
let task = fasync::Task::spawn(async move {
let listener = accept_stream(&mut listener);
let listener = listener
.map_err(Error::from)
.map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn });
let connections = if let Some(tls_acceptor) = tls_acceptor {
// wrap incoming tcp streams
listener
.and_then(move |conn| {
tls_acceptor.accept(conn).map(|res| match res {
Ok(conn) => {
Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
}
Err(e) => Err(Error::from(e)),
})
})
.boxed() // connections
} else {
listener
.map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>)
.boxed() // connections
};
// This is the root Arc<Vec<Arc<dyn Handler>>>.
let handlers = Arc::new(self.handlers);
let make_svc = make_service_fn(move |_socket| {
// Each connection to the server receives a separate service_fn instance, and so
// needs it's own copy of the handlers, this is a factory of sorts.
let handlers = Arc::clone(&handlers);
async move {
Ok::<_, Infallible>(service_fn(move |req| {
// Each request made by a connection is serviced by the service_fn created from
// this scope, which is why there is another cloning of the Arc of Handlers.
let method = req.method().to_owned();
let path = req.uri().path().to_owned();
TestServer::handle_request(Arc::clone(&handlers), req)
.inspect(move |x| {
println!(
"{} [test http] {} {} => {}",
Utc::now().format("%T.%6f"),
method,
path,
x.status()
)
})
.map(Ok::<_, Infallible>)
}))
}
});
Server::builder(from_stream(connections))
.executor(fuchsia_hyper::Executor)
.serve(make_svc)
.with_graceful_shutdown(
rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())),
)
.unwrap_or_else(|e| panic!("error serving repo over http: {}", e))
.await;
});
TestServer { stop, addr, use_https, task }
}
}
#[cfg(target_os = "fuchsia")]
async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener {
fuchsia_async::net::TcpListener::bind(addr).unwrap()
}
#[cfg(not(target_os = "fuchsia"))]
async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener {
async_net::TcpListener::bind(addr).await.unwrap()
}
#[cfg(target_os = "fuchsia")]
fn accept_stream<'a>(
listener: &'a mut fuchsia_async::net::TcpListener,
) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a {
use std::task::{Context, Poll};
#[pin_project::pin_project]
struct AcceptStream<'a> {
#[pin]
listener: &'a mut fuchsia_async::net::TcpListener,
}
impl<'a> Stream for AcceptStream<'a> {
type Item = std::io::Result<fuchsia_async::net::TcpStream>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.listener.async_accept(cx) {
Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
Poll::Pending => Poll::Pending,
}
}
}
AcceptStream { listener }
}
#[cfg(not(target_os = "fuchsia"))]
fn accept_stream<'a>(
listener: &'a mut async_net::TcpListener,
) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a {
listener.incoming()
}
fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> {
rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse")
}
fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey {
let keys =
rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse");
assert_eq!(keys.len(), 1, "expecting a single private key");
keys.into_iter().next().unwrap()
}
trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {}
// These are a set of useful functions when writing tests.
/// Create a GET request for a given url, which can be used with any hyper client.
pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> {
Request::get(url.as_ref | /// Create a Builder
pub fn builder() -> TestServerBuilder {
TestServerBuilder::new()
}
} | random_line_split |
concurrent_ntlm_auth_requests.py | if num> len(sequences):
num = len(sequences)
choices = random.sample(sequences,num)
return choices
def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):
curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #็ญๅพ
่ถ
ๆถ
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-ๅคฑ่ดฅ: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
็จไบurlๅ็ฑปๆต่ฏ๏ผๆต่ฏๆไปถไธญๅญๆพๅคง้็urlๅฐๅ
:param from_file: str
:return: list๏ผ URL_list๏ผGenerator๏ผ
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #ๆ ๅ่ฎฎๅคด้จ๏ผ้ป่ฎคๅ httpๅ่ฎฎ
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='่ฏฅPython3่ๆฌ็จไบASWGๅๅนถๅ่ฎค่ฏๆต่ฏใ\n 1ใไฝฟ็จๆนๆณ็คบไพ:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅๆต่ฏ็ๆต่ฏๆฌกๆฐ๏ผ้ป่ฎค1่ฝฎๆต่ฏๅณๅๆญข')
parser.add_argument('-s','--starttime', type=str, default='',help='้ฆๆฌก่ฎค่ฏๅนถๅๆต่ฏ็ๆถ้ด๏ผๅฆ 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผๅญ่ฟๆๆถ้ด๏ผ้ป่ฎค600็ง')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆท็ซฏIPๅ็ผ๏ผ้ป่ฎคๅชๆฏๆCๆฎต๏ผๅ
ถไปๆนๅผ่ช่ก้้
')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆฏๅฆไฝฟ็จ็ธๅURLๆต่ฏ')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝๆๅฎไฝฟ็จ็ธๅURLๆถ๏ผๆๅฎๆฏhttp่ฟๆฏhttps่ฏทๆฑ')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆไปถ')
parser.add_argument('-f1','--url-index', type=int | quences.append(prefix+str(i))
| conditional_block |
|
concurrent_ntlm_auth_requests.py | subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #็ญๅพ
่ถ
ๆถ
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-ๅคฑ่ดฅ: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
็จไบurlๅ็ฑปๆต่ฏ๏ผๆต่ฏๆไปถไธญๅญๆพๅคง้็urlๅฐๅ
:param from_file: str
:return: list๏ผ URL_list๏ผGenerator๏ผ
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #ๆ ๅ่ฎฎๅคด้จ๏ผ้ป่ฎคๅ httpๅ่ฎฎ
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='่ฏฅPython3่ๆฌ็จไบASWGๅๅนถๅ่ฎค่ฏๆต่ฏใ\n 1ใไฝฟ็จๆนๆณ็คบไพ:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅๆต่ฏ็ๆต่ฏๆฌกๆฐ๏ผ้ป่ฎค1่ฝฎๆต่ฏๅณๅๆญข')
parser.add_argument('-s','--starttime', type=str, default='',help='้ฆๆฌก่ฎค่ฏๅนถๅๆต่ฏ็ๆถ้ด๏ผๅฆ 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผๅญ่ฟๆๆถ้ด๏ผ้ป่ฎค600็ง')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆท็ซฏIPๅ็ผ๏ผ้ป่ฎคๅชๆฏๆCๆฎต๏ผๅ
ถไปๆนๅผ่ช่ก้้
')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆฏๅฆไฝฟ็จ็ธๅURLๆต่ฏ')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝๆๅฎไฝฟ็จ็ธๅURLๆถ๏ผๆๅฎๆฏhttp่ฟๆฏhttps่ฏทๆฑ')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆไปถ')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆไปถไธญๅญๆฎตๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a0','--start-user-index', type=int | def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):
curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(
cert,eth,user,proxy,url)
| random_line_split |
|
concurrent_ntlm_auth_requests.py | {3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #็ญๅพ
่ถ
ๆถ
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-ๅคฑ่ดฅ: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
็จไบurlๅ็ฑปๆต่ฏ๏ผๆต่ฏๆไปถไธญๅญๆพๅคง้็urlๅฐๅ
:param from_file: str
:return: list๏ผ URL_list๏ผGenerator๏ผ
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #ๆ ๅ่ฎฎๅคด้จ๏ผ้ป่ฎคๅ httpๅ่ฎฎ
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
| 172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='่ฏฅPython3่ๆฌ็จไบASWGๅๅนถๅ่ฎค่ฏๆต่ฏใ\n 1ใไฝฟ็จๆนๆณ็คบไพ:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅๆต่ฏ็ๆต่ฏๆฌกๆฐ๏ผ้ป่ฎค1่ฝฎๆต่ฏๅณๅๆญข')
parser.add_argument('-s','--starttime', type=str, default='',help='้ฆๆฌก่ฎค่ฏๅนถๅๆต่ฏ็ๆถ้ด๏ผๅฆ 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผๅญ่ฟๆๆถ้ด๏ผ้ป่ฎค600็ง')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆท็ซฏIPๅ็ผ๏ผ้ป่ฎคๅชๆฏๆCๆฎต๏ผๅ
ถไปๆนๅผ่ช่ก้้
')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆฏๅฆไฝฟ็จ็ธๅURLๆต่ฏ')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝๆๅฎไฝฟ็จ็ธๅURLๆถ๏ผๆๅฎๆฏhttp่ฟๆฏhttps่ฏทๆฑ')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆไปถ')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆไปถไธญๅญๆฎตๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็จๆท็ๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็จๆทๆฐ้')
parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผๅง็ๅญ็ฝๅก | ip_prefix = ' | identifier_name |
concurrent_ntlm_auth_requests.py | 3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #็ญๅพ
่ถ
ๆถ
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-ๅคฑ่ดฅ: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
็จไบurl | else: #ๆ ๅ่ฎฎๅคด้จ๏ผ้ป่ฎคๅ httpๅ่ฎฎ
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,e
th_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='่ฏฅPython3่ๆฌ็จไบASWGๅๅนถๅ่ฎค่ฏๆต่ฏใ\n 1ใไฝฟ็จๆนๆณ็คบไพ:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅๆต่ฏ็ๆต่ฏๆฌกๆฐ๏ผ้ป่ฎค1่ฝฎๆต่ฏๅณๅๆญข')
parser.add_argument('-s','--starttime', type=str, default='',help='้ฆๆฌก่ฎค่ฏๅนถๅๆต่ฏ็ๆถ้ด๏ผๅฆ 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผๅญ่ฟๆๆถ้ด๏ผ้ป่ฎค600็ง')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆท็ซฏIPๅ็ผ๏ผ้ป่ฎคๅชๆฏๆCๆฎต๏ผๅ
ถไปๆนๅผ่ช่ก้้
')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆฏๅฆไฝฟ็จ็ธๅURLๆต่ฏ')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝๆๅฎไฝฟ็จ็ธๅURLๆถ๏ผๆๅฎๆฏhttp่ฟๆฏhttps่ฏทๆฑ')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆไปถ')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆไปถไธญๅญๆฎตๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็จๆท็ๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็จๆทๆฐ้')
parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผๅง็ๅญ็ฝ | ๅ็ฑปๆต่ฏ๏ผๆต่ฏๆไปถไธญๅญๆพๅคง้็urlๅฐๅ
:param from_file: str
:return: list๏ผ URL_list๏ผGenerator๏ผ
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
| identifier_body |
spline.rs | _vec();
if control_points.len() == 2 {
kind = SliderSplineKind::Linear;
}
if control_points.len() == 3
&& Math::is_line(
control_points[0].to_float::<f64>().unwrap(),
control_points[1].to_float::<f64>().unwrap(),
control_points[2].to_float::<f64>().unwrap(),
)
{
kind = SliderSplineKind::Linear;
control_points.remove(1);
}
let points = control_points
.iter()
.map(|p| Point::new(p.x as f64, p.y as f64))
.collect::<Vec<_>>();
let spline_points = match kind {
SliderSplineKind::Linear => {
let start = points[0];
let end = if let Some(pixel_length) = pixel_length {
Math::point_on_line(points[0], points[1], pixel_length)
} else {
points[1]
};
vec![start, end]
}
SliderSplineKind::Perfect => {
let (p1, p2, p3) = (points[0], points[1], points[2]);
let (center, radius) = Math::circumcircle(p1, p2, p3);
// find the t-values of the start and end of the slider
let t0 = (center.y - p1.y).atan2(p1.x - center.x);
let mut mid = (center.y - p2.y).atan2(p2.x - center.x);
let mut t1 = (center.y - p3.y).atan2(p3.x - center.x);
// make sure t0 is less than t1
while mid < t0 {
mid += std::f64::consts::TAU;
}
while t1 < t0 {
t1 += std::f64::consts::TAU;
}
if mid > t1 {
t1 -= std::f64::consts::TAU;
}
let diff = (t1 - t0).abs();
let pixel_length = pixel_length.unwrap_or(radius * diff);
// circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r)
let direction_unit = (t1 - t0) / (t1 - t0).abs();
let new_t1 = t0 + direction_unit * (pixel_length / radius);
let mut t = t0;
let mut c = Vec::new();
loop {
if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) {
break;
}
let rel = Point::new(t.cos() * radius, -t.sin() * radius);
c.push(center + rel);
t += (new_t1 - t0) / pixel_length;
}
c
}
SliderSplineKind::Bezier => {
let mut output = Vec::new();
let mut last_index = 0;
let mut i = 0;
while i < points.len() {
let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]);
if multipart_segment || i == points.len() - 1 {
let sub = &points[last_index..i + 1];
if sub.len() == 2 {
output.push(points[0]);
output.push(points[1]);
} else {
create_singlebezier(&mut output, sub);
}
if multipart_segment {
i += 1;
}
last_index = i;
}
i += 1;
}
output
}
_ => todo!(),
};
let mut cumulative_lengths = Vec::with_capacity(spline_points.len());
let mut curr = 0.0;
// using NotNan here because these need to be binary-searched over
// and f64 isn't Ord
cumulative_lengths.push(NotNan::new(curr).unwrap());
for points in spline_points.windows(2) {
let dist = points[0].distance(points[1]);
curr += dist;
cumulative_lengths.push(NotNan::new(curr).unwrap());
}
Spline {
spline_points,
cumulative_lengths,
}
}
/// Truncate the length of the spline irreversibly
pub fn truncate(&mut self, to_length: f64) {
debug!("truncating to {} pixels", to_length);
let mut limit_idx = None;
for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() {
if cumul_length.into_inner() > to_length {
limit_idx = Some(i);
break;
}
}
let limit_idx = match limit_idx {
Some(v) if v > 0 => v,
_ => return,
};
let prev_idx = limit_idx - 1;
let a = self.spline_points[prev_idx];
let b = self.spline_points[limit_idx];
let a_len = self.cumulative_lengths[prev_idx];
debug!("a={:?} (a_len={}) b={:?}", a, b, a_len);
let remain = to_length - a_len.into_inner();
let mid = Math::point_on_line(a, b, remain);
debug!("remain={:?} mid={:?}", remain, mid);
self.spline_points[limit_idx] = mid;
self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap();
debug!("spline_points[{}] = {:?}", limit_idx, mid);
debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length);
self.spline_points.truncate(limit_idx + 1);
self.cumulative_lengths.truncate(limit_idx + 1);
debug!("truncated to len {}", limit_idx + 1);
}
/// Return the pixel length of this spline
pub fn pixel_length(&self) -> f64 {
self.cumulative_lengths.last().unwrap().into_inner()
}
/// Return the endpoint of this spline
pub fn end_point(&self) -> P {
self.spline_points.last().cloned().unwrap()
}
/// Calculate the angle at the given length on the slider
fn angle_at_length(&self, length: f64) -> P {
let _length_notnan = NotNan::new(length).unwrap();
// match self.cumulative_lengths.binary_search(&length_notnan) {
// Ok(_) => {}
// Err(_) => {}
// }
todo!()
}
/// Calculate the point at which the slider ball would be after it has traveled a distance of
/// `length` into the slider.
pub fn point_at_length(&self, length: f64) -> P {
let length_notnan = NotNan::new(length).unwrap();
match self.cumulative_lengths.binary_search(&length_notnan) {
Ok(idx) => self.spline_points[idx],
Err(idx) => {
let n = self.spline_points.len();
if idx == 0 && self.spline_points.len() > 2 {
return self.spline_points[0];
} else if idx == n {
return self.spline_points[n - 1];
} | self.cumulative_lengths[idx].into_inner(),
);
let proportion = (length - len1) / (len2 - len1);
let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]);
(p2 - p1) * P::new(proportion, proportion) + p1
}
}
}
}
type P = Point<f64>;
fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) {
let count = control_points.len();
midpoints_buf.copy_from_slice(control_points);
for i in 0..count {
l[i] = midpoints_buf[0];
r[count - i - 1] = midpoints_buf[count - i - 1];
for j in 0..count - i - 1 {
midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0);
}
}
}
fn approximate(
control_points: &[P],
output: &mut Vec<P>,
l_buf: &mut [P],
r_buf: &mut [P],
midpoints_buf: &mut [P],
) {
let count = control_points.len();
subdivide(&control_points, l_buf, r_buf, midpoints_buf);
l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]);
output.push(control_points[0]);
for i in 1..count - 1 {
let index = 2 * i;
let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1])
* P::new(0.25, 0.25);
output.push(p |
let (len1, len2) = (
self.cumulative_lengths[idx - 1].into_inner(), | random_line_split |
spline.rs | _vec();
if control_points.len() == 2 {
kind = SliderSplineKind::Linear;
}
if control_points.len() == 3
&& Math::is_line(
control_points[0].to_float::<f64>().unwrap(),
control_points[1].to_float::<f64>().unwrap(),
control_points[2].to_float::<f64>().unwrap(),
)
{
kind = SliderSplineKind::Linear;
control_points.remove(1);
}
let points = control_points
.iter()
.map(|p| Point::new(p.x as f64, p.y as f64))
.collect::<Vec<_>>();
let spline_points = match kind {
SliderSplineKind::Linear => {
let start = points[0];
let end = if let Some(pixel_length) = pixel_length {
Math::point_on_line(points[0], points[1], pixel_length)
} else {
points[1]
};
vec![start, end]
}
SliderSplineKind::Perfect => {
let (p1, p2, p3) = (points[0], points[1], points[2]);
let (center, radius) = Math::circumcircle(p1, p2, p3);
// find the t-values of the start and end of the slider
let t0 = (center.y - p1.y).atan2(p1.x - center.x);
let mut mid = (center.y - p2.y).atan2(p2.x - center.x);
let mut t1 = (center.y - p3.y).atan2(p3.x - center.x);
// make sure t0 is less than t1
while mid < t0 {
mid += std::f64::consts::TAU;
}
while t1 < t0 {
t1 += std::f64::consts::TAU;
}
if mid > t1 {
t1 -= std::f64::consts::TAU;
}
let diff = (t1 - t0).abs();
let pixel_length = pixel_length.unwrap_or(radius * diff);
// circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r)
let direction_unit = (t1 - t0) / (t1 - t0).abs();
let new_t1 = t0 + direction_unit * (pixel_length / radius);
let mut t = t0;
let mut c = Vec::new();
loop {
if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) {
break;
}
let rel = Point::new(t.cos() * radius, -t.sin() * radius);
c.push(center + rel);
t += (new_t1 - t0) / pixel_length;
}
c
}
SliderSplineKind::Bezier => {
let mut output = Vec::new();
let mut last_index = 0;
let mut i = 0;
while i < points.len() {
let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]);
if multipart_segment || i == points.len() - 1 {
let sub = &points[last_index..i + 1];
if sub.len() == 2 {
output.push(points[0]);
output.push(points[1]);
} else {
create_singlebezier(&mut output, sub);
}
if multipart_segment {
i += 1;
}
last_index = i;
}
i += 1;
}
output
}
_ => todo!(),
};
let mut cumulative_lengths = Vec::with_capacity(spline_points.len());
let mut curr = 0.0;
// using NotNan here because these need to be binary-searched over
// and f64 isn't Ord
cumulative_lengths.push(NotNan::new(curr).unwrap());
for points in spline_points.windows(2) {
let dist = points[0].distance(points[1]);
curr += dist;
cumulative_lengths.push(NotNan::new(curr).unwrap());
}
Spline {
spline_points,
cumulative_lengths,
}
}
/// Truncate the length of the spline irreversibly
pub fn truncate(&mut self, to_length: f64) {
debug!("truncating to {} pixels", to_length);
let mut limit_idx = None;
for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() {
if cumul_length.into_inner() > to_length {
limit_idx = Some(i);
break;
}
}
let limit_idx = match limit_idx {
Some(v) if v > 0 => v,
_ => return,
};
let prev_idx = limit_idx - 1;
let a = self.spline_points[prev_idx];
let b = self.spline_points[limit_idx];
let a_len = self.cumulative_lengths[prev_idx];
debug!("a={:?} (a_len={}) b={:?}", a, b, a_len);
let remain = to_length - a_len.into_inner();
let mid = Math::point_on_line(a, b, remain);
debug!("remain={:?} mid={:?}", remain, mid);
self.spline_points[limit_idx] = mid;
self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap();
debug!("spline_points[{}] = {:?}", limit_idx, mid);
debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length);
self.spline_points.truncate(limit_idx + 1);
self.cumulative_lengths.truncate(limit_idx + 1);
debug!("truncated to len {}", limit_idx + 1);
}
/// Return the pixel length of this spline
pub fn pixel_length(&self) -> f64 {
self.cumulative_lengths.last().unwrap().into_inner()
}
/// Return the endpoint of this spline
pub fn end_point(&self) -> P {
self.spline_points.last().cloned().unwrap()
}
/// Calculate the angle at the given length on the slider
fn angle_at_length(&self, length: f64) -> P {
let _length_notnan = NotNan::new(length).unwrap();
// match self.cumulative_lengths.binary_search(&length_notnan) {
// Ok(_) => {}
// Err(_) => {}
// }
todo!()
}
/// Calculate the point at which the slider ball would be after it has traveled a distance of
/// `length` into the slider.
pub fn point_at_length(&self, length: f64) -> P {
let length_notnan = NotNan::new(length).unwrap();
match self.cumulative_lengths.binary_search(&length_notnan) {
Ok(idx) => self.spline_points[idx],
Err(idx) => {
let n = self.spline_points.len();
if idx == 0 && self.spline_points.len() > 2 {
return self.spline_points[0];
} else if idx == n {
return self.spline_points[n - 1];
}
let (len1, len2) = (
self.cumulative_lengths[idx - 1].into_inner(),
self.cumulative_lengths[idx].into_inner(),
);
let proportion = (length - len1) / (len2 - len1);
let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]);
(p2 - p1) * P::new(proportion, proportion) + p1
}
}
}
}
type P = Point<f64>;
fn | (control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) {
let count = control_points.len();
midpoints_buf.copy_from_slice(control_points);
for i in 0..count {
l[i] = midpoints_buf[0];
r[count - i - 1] = midpoints_buf[count - i - 1];
for j in 0..count - i - 1 {
midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0);
}
}
}
fn approximate(
control_points: &[P],
output: &mut Vec<P>,
l_buf: &mut [P],
r_buf: &mut [P],
midpoints_buf: &mut [P],
) {
let count = control_points.len();
subdivide(&control_points, l_buf, r_buf, midpoints_buf);
l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]);
output.push(control_points[0]);
for i in 1..count - 1 {
let index = 2 * i;
let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1])
* P::new(0.25, 0.25);
output.push(p | subdivide | identifier_name |
spline.rs | _vec();
if control_points.len() == 2 {
kind = SliderSplineKind::Linear;
}
if control_points.len() == 3
&& Math::is_line(
control_points[0].to_float::<f64>().unwrap(),
control_points[1].to_float::<f64>().unwrap(),
control_points[2].to_float::<f64>().unwrap(),
)
{
kind = SliderSplineKind::Linear;
control_points.remove(1);
}
let points = control_points
.iter()
.map(|p| Point::new(p.x as f64, p.y as f64))
.collect::<Vec<_>>();
let spline_points = match kind {
SliderSplineKind::Linear => {
let start = points[0];
let end = if let Some(pixel_length) = pixel_length {
Math::point_on_line(points[0], points[1], pixel_length)
} else {
points[1]
};
vec![start, end]
}
SliderSplineKind::Perfect => {
let (p1, p2, p3) = (points[0], points[1], points[2]);
let (center, radius) = Math::circumcircle(p1, p2, p3);
// find the t-values of the start and end of the slider
let t0 = (center.y - p1.y).atan2(p1.x - center.x);
let mut mid = (center.y - p2.y).atan2(p2.x - center.x);
let mut t1 = (center.y - p3.y).atan2(p3.x - center.x);
// make sure t0 is less than t1
while mid < t0 {
mid += std::f64::consts::TAU;
}
while t1 < t0 {
t1 += std::f64::consts::TAU;
}
if mid > t1 {
t1 -= std::f64::consts::TAU;
}
let diff = (t1 - t0).abs();
let pixel_length = pixel_length.unwrap_or(radius * diff);
// circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r)
let direction_unit = (t1 - t0) / (t1 - t0).abs();
let new_t1 = t0 + direction_unit * (pixel_length / radius);
let mut t = t0;
let mut c = Vec::new();
loop {
if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) {
break;
}
let rel = Point::new(t.cos() * radius, -t.sin() * radius);
c.push(center + rel);
t += (new_t1 - t0) / pixel_length;
}
c
}
SliderSplineKind::Bezier => {
let mut output = Vec::new();
let mut last_index = 0;
let mut i = 0;
while i < points.len() {
let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]);
if multipart_segment || i == points.len() - 1 {
let sub = &points[last_index..i + 1];
if sub.len() == 2 {
output.push(points[0]);
output.push(points[1]);
} else {
create_singlebezier(&mut output, sub);
}
if multipart_segment {
i += 1;
}
last_index = i;
}
i += 1;
}
output
}
_ => todo!(),
};
let mut cumulative_lengths = Vec::with_capacity(spline_points.len());
let mut curr = 0.0;
// using NotNan here because these need to be binary-searched over
// and f64 isn't Ord
cumulative_lengths.push(NotNan::new(curr).unwrap());
for points in spline_points.windows(2) {
let dist = points[0].distance(points[1]);
curr += dist;
cumulative_lengths.push(NotNan::new(curr).unwrap());
}
Spline {
spline_points,
cumulative_lengths,
}
}
/// Truncate the length of the spline irreversibly
pub fn truncate(&mut self, to_length: f64) {
debug!("truncating to {} pixels", to_length);
let mut limit_idx = None;
for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() {
if cumul_length.into_inner() > to_length {
limit_idx = Some(i);
break;
}
}
let limit_idx = match limit_idx {
Some(v) if v > 0 => v,
_ => return,
};
let prev_idx = limit_idx - 1;
let a = self.spline_points[prev_idx];
let b = self.spline_points[limit_idx];
let a_len = self.cumulative_lengths[prev_idx];
debug!("a={:?} (a_len={}) b={:?}", a, b, a_len);
let remain = to_length - a_len.into_inner();
let mid = Math::point_on_line(a, b, remain);
debug!("remain={:?} mid={:?}", remain, mid);
self.spline_points[limit_idx] = mid;
self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap();
debug!("spline_points[{}] = {:?}", limit_idx, mid);
debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length);
self.spline_points.truncate(limit_idx + 1);
self.cumulative_lengths.truncate(limit_idx + 1);
debug!("truncated to len {}", limit_idx + 1);
}
/// Return the pixel length of this spline
pub fn pixel_length(&self) -> f64 {
self.cumulative_lengths.last().unwrap().into_inner()
}
/// Return the endpoint of this spline
pub fn end_point(&self) -> P |
/// Calculate the angle at the given length on the slider
fn angle_at_length(&self, length: f64) -> P {
let _length_notnan = NotNan::new(length).unwrap();
// match self.cumulative_lengths.binary_search(&length_notnan) {
// Ok(_) => {}
// Err(_) => {}
// }
todo!()
}
/// Calculate the point at which the slider ball would be after it has traveled a distance of
/// `length` into the slider.
pub fn point_at_length(&self, length: f64) -> P {
let length_notnan = NotNan::new(length).unwrap();
match self.cumulative_lengths.binary_search(&length_notnan) {
Ok(idx) => self.spline_points[idx],
Err(idx) => {
let n = self.spline_points.len();
if idx == 0 && self.spline_points.len() > 2 {
return self.spline_points[0];
} else if idx == n {
return self.spline_points[n - 1];
}
let (len1, len2) = (
self.cumulative_lengths[idx - 1].into_inner(),
self.cumulative_lengths[idx].into_inner(),
);
let proportion = (length - len1) / (len2 - len1);
let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]);
(p2 - p1) * P::new(proportion, proportion) + p1
}
}
}
}
type P = Point<f64>;
fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) {
let count = control_points.len();
midpoints_buf.copy_from_slice(control_points);
for i in 0..count {
l[i] = midpoints_buf[0];
r[count - i - 1] = midpoints_buf[count - i - 1];
for j in 0..count - i - 1 {
midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0);
}
}
}
fn approximate(
control_points: &[P],
output: &mut Vec<P>,
l_buf: &mut [P],
r_buf: &mut [P],
midpoints_buf: &mut [P],
) {
let count = control_points.len();
subdivide(&control_points, l_buf, r_buf, midpoints_buf);
l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]);
output.push(control_points[0]);
for i in 1..count - 1 {
let index = 2 * i;
let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1])
* P::new(0.25, 0.25);
output.push | {
self.spline_points.last().cloned().unwrap()
} | identifier_body |
spline.rs | _vec();
if control_points.len() == 2 {
kind = SliderSplineKind::Linear;
}
if control_points.len() == 3
&& Math::is_line(
control_points[0].to_float::<f64>().unwrap(),
control_points[1].to_float::<f64>().unwrap(),
control_points[2].to_float::<f64>().unwrap(),
)
{
kind = SliderSplineKind::Linear;
control_points.remove(1);
}
let points = control_points
.iter()
.map(|p| Point::new(p.x as f64, p.y as f64))
.collect::<Vec<_>>();
let spline_points = match kind {
SliderSplineKind::Linear => {
let start = points[0];
let end = if let Some(pixel_length) = pixel_length {
Math::point_on_line(points[0], points[1], pixel_length)
} else {
points[1]
};
vec![start, end]
}
SliderSplineKind::Perfect => | let diff = (t1 - t0).abs();
let pixel_length = pixel_length.unwrap_or(radius * diff);
// circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r)
let direction_unit = (t1 - t0) / (t1 - t0).abs();
let new_t1 = t0 + direction_unit * (pixel_length / radius);
let mut t = t0;
let mut c = Vec::new();
loop {
if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) {
break;
}
let rel = Point::new(t.cos() * radius, -t.sin() * radius);
c.push(center + rel);
t += (new_t1 - t0) / pixel_length;
}
c
}
SliderSplineKind::Bezier => {
let mut output = Vec::new();
let mut last_index = 0;
let mut i = 0;
while i < points.len() {
let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]);
if multipart_segment || i == points.len() - 1 {
let sub = &points[last_index..i + 1];
if sub.len() == 2 {
output.push(points[0]);
output.push(points[1]);
} else {
create_singlebezier(&mut output, sub);
}
if multipart_segment {
i += 1;
}
last_index = i;
}
i += 1;
}
output
}
_ => todo!(),
};
let mut cumulative_lengths = Vec::with_capacity(spline_points.len());
let mut curr = 0.0;
// using NotNan here because these need to be binary-searched over
// and f64 isn't Ord
cumulative_lengths.push(NotNan::new(curr).unwrap());
for points in spline_points.windows(2) {
let dist = points[0].distance(points[1]);
curr += dist;
cumulative_lengths.push(NotNan::new(curr).unwrap());
}
Spline {
spline_points,
cumulative_lengths,
}
}
/// Truncate the length of the spline irreversibly
pub fn truncate(&mut self, to_length: f64) {
debug!("truncating to {} pixels", to_length);
let mut limit_idx = None;
for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() {
if cumul_length.into_inner() > to_length {
limit_idx = Some(i);
break;
}
}
let limit_idx = match limit_idx {
Some(v) if v > 0 => v,
_ => return,
};
let prev_idx = limit_idx - 1;
let a = self.spline_points[prev_idx];
let b = self.spline_points[limit_idx];
let a_len = self.cumulative_lengths[prev_idx];
debug!("a={:?} (a_len={}) b={:?}", a, b, a_len);
let remain = to_length - a_len.into_inner();
let mid = Math::point_on_line(a, b, remain);
debug!("remain={:?} mid={:?}", remain, mid);
self.spline_points[limit_idx] = mid;
self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap();
debug!("spline_points[{}] = {:?}", limit_idx, mid);
debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length);
self.spline_points.truncate(limit_idx + 1);
self.cumulative_lengths.truncate(limit_idx + 1);
debug!("truncated to len {}", limit_idx + 1);
}
/// Return the pixel length of this spline
pub fn pixel_length(&self) -> f64 {
self.cumulative_lengths.last().unwrap().into_inner()
}
/// Return the endpoint of this spline
pub fn end_point(&self) -> P {
self.spline_points.last().cloned().unwrap()
}
/// Calculate the angle at the given length on the slider
fn angle_at_length(&self, length: f64) -> P {
let _length_notnan = NotNan::new(length).unwrap();
// match self.cumulative_lengths.binary_search(&length_notnan) {
// Ok(_) => {}
// Err(_) => {}
// }
todo!()
}
/// Calculate the point at which the slider ball would be after it has traveled a distance of
/// `length` into the slider.
pub fn point_at_length(&self, length: f64) -> P {
let length_notnan = NotNan::new(length).unwrap();
match self.cumulative_lengths.binary_search(&length_notnan) {
Ok(idx) => self.spline_points[idx],
Err(idx) => {
let n = self.spline_points.len();
if idx == 0 && self.spline_points.len() > 2 {
return self.spline_points[0];
} else if idx == n {
return self.spline_points[n - 1];
}
let (len1, len2) = (
self.cumulative_lengths[idx - 1].into_inner(),
self.cumulative_lengths[idx].into_inner(),
);
let proportion = (length - len1) / (len2 - len1);
let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]);
(p2 - p1) * P::new(proportion, proportion) + p1
}
}
}
}
type P = Point<f64>;
fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) {
let count = control_points.len();
midpoints_buf.copy_from_slice(control_points);
for i in 0..count {
l[i] = midpoints_buf[0];
r[count - i - 1] = midpoints_buf[count - i - 1];
for j in 0..count - i - 1 {
midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0);
}
}
}
fn approximate(
control_points: &[P],
output: &mut Vec<P>,
l_buf: &mut [P],
r_buf: &mut [P],
midpoints_buf: &mut [P],
) {
let count = control_points.len();
subdivide(&control_points, l_buf, r_buf, midpoints_buf);
l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]);
output.push(control_points[0]);
for i in 1..count - 1 {
let index = 2 * i;
let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1])
* P::new(0.25, 0.25);
output.push(p | {
let (p1, p2, p3) = (points[0], points[1], points[2]);
let (center, radius) = Math::circumcircle(p1, p2, p3);
// find the t-values of the start and end of the slider
let t0 = (center.y - p1.y).atan2(p1.x - center.x);
let mut mid = (center.y - p2.y).atan2(p2.x - center.x);
let mut t1 = (center.y - p3.y).atan2(p3.x - center.x);
// make sure t0 is less than t1
while mid < t0 {
mid += std::f64::consts::TAU;
}
while t1 < t0 {
t1 += std::f64::consts::TAU;
}
if mid > t1 {
t1 -= std::f64::consts::TAU;
}
| conditional_block |
pattern.rs | // This is the search method from the Searcher trait
/// let matches = same_add.search(&egraph);
/// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect();
/// assert_eq!(matched_eclasses, vec![a11, a22]);
/// ```
///
/// [`FromStr`]: std::str::FromStr
#[derive(Debug, PartialEq, Clone)]
pub struct Pattern<L> {
/// The actual pattern as a [`RecExpr`]
pub ast: PatternAst<L>,
program: machine::Program<L>,
}
/// A [`RecExpr`] that represents a
/// [`Pattern`].
pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>;
impl<L: Language> PatternAst<L> {
/// Returns a new `PatternAst` with the variables renames canonically
pub fn alpha_rename(&self) -> Self {
let mut vars = HashMap::<Var, Var>::default();
let mut new = PatternAst::default();
fn mkvar(i: usize) -> Var {
let vs = &["?x", "?y", "?z", "?w"];
match vs.get(i) {
Some(v) => v.parse().unwrap(),
None => format!("?v{}", i - vs.len()).parse().unwrap(),
}
}
for n in self.as_ref() {
new.add(match n {
ENodeOrVar::ENode(_) => n.clone(),
ENodeOrVar::Var(v) => {
let i = vars.len();
ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i)))
}
});
}
new
}
}
impl<L: Language> Pattern<L> {
/// Creates a new pattern from the given pattern ast.
pub fn new(ast: PatternAst<L>) -> Self {
let ast = ast.compact();
let program = machine::Program::compile_from_pat(&ast);
Pattern { ast, program }
}
/// Returns a list of the [`Var`]s in this pattern.
pub fn vars(&self) -> Vec<Var> {
let mut vars = vec![];
for n in self.ast.as_ref() {
if let ENodeOrVar::Var(v) = n {
if !vars.contains(v) {
vars.push(*v)
}
}
}
vars
}
}
impl<L: Language + Display> Pattern<L> {
/// Pretty print this pattern as a sexp with the given width
pub fn pretty(&self, width: usize) -> String {
self.ast.pretty(width)
}
}
/// The language of [`Pattern`]s.
///
#[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub enum ENodeOrVar<L> {
/// An enode from the underlying [`Language`]
ENode(L),
/// A pattern variable
Var(Var),
}
impl<L: Language> Language for ENodeOrVar<L> {
fn matches(&self, _other: &Self) -> bool {
panic!("Should never call this")
}
fn children(&self) -> &[Id] {
match self {
ENodeOrVar::ENode(n) => n.children(),
ENodeOrVar::Var(_) => &[],
}
}
fn children_mut(&mut self) -> &mut [Id] {
match self {
ENodeOrVar::ENode(n) => n.children_mut(),
ENodeOrVar::Var(_) => &mut [],
}
}
}
impl<L: Language + Display> Display for ENodeOrVar<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::ENode(node) => Display::fmt(node, f),
Self::Var(var) => Display::fmt(var, f),
}
}
}
#[derive(Debug, Error)]
pub enum ENodeOrVarParseError<E> {
#[error(transparent)]
BadVar(<Var as FromStr>::Err),
#[error("tried to parse pattern variable {0:?} as an operator")]
UnexpectedVar(String),
#[error(transparent)]
BadOp(E),
}
impl<L: FromOp> FromOp for ENodeOrVar<L> {
type Error = ENodeOrVarParseError<L::Error>;
fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> {
use ENodeOrVarParseError::*;
if op.starts_with('?') && op.len() > 1 {
if children.is_empty() {
op.parse().map(Self::Var).map_err(BadVar)
} else {
Err(UnexpectedVar(op.to_owned()))
}
} else {
L::from_op(op, children).map(Self::ENode).map_err(BadOp)
}
}
}
impl<L: FromOp> std::str::FromStr for Pattern<L> {
type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
PatternAst::from_str(s).map(Self::from)
}
}
impl<'a, L: Language> From<&'a [L]> for Pattern<L> {
fn from(expr: &'a [L]) -> Self {
let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect();
let ast = RecExpr::from(nodes);
Self::new(ast)
}
}
impl<L: Language> From<PatternAst<L>> for Pattern<L> {
fn from(ast: PatternAst<L>) -> Self {
Self::new(ast)
}
}
impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> {
type Error = Var;
fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> {
let nodes = pat.ast.as_ref().iter().cloned();
let ns: Result<Vec<_>, _> = nodes
.map(|n| match n {
ENodeOrVar::ENode(n) => Ok(n),
ENodeOrVar::Var(v) => Err(v),
})
.collect();
ns.map(RecExpr::from)
}
}
impl<L: Language + Display> Display for Pattern<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.ast, f)
}
}
/// The result of searching a [`Searcher`] over one eclass.
///
/// Note that one [`SearchMatches`] can contain many found
/// substititions. So taking the length of a list of [`SearchMatches`]
/// tells you how many eclasses something was matched in, _not_ how
/// many matches were found total.
///
#[derive(Debug)]
pub struct SearchMatches<'a, L: Language> {
/// The eclass id that these matches were found in.
pub eclass: Id,
/// The substitutions for each match.
pub substs: Vec<Subst>,
/// Optionally, an ast for the matches used in proof production.
pub ast: Option<Cow<'a, PatternAst<L>>>,
}
impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> {
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> {
match self.ast.as_ref().last().unwrap() {
ENodeOrVar::ENode(e) => {
#[allow(clippy::mem_discriminant_non_enum)]
let key = std::mem::discriminant(e);
match egraph.classes_by_op.get(&key) {
None => vec![],
Some(ids) => ids
.iter()
.filter_map(|&id| self.search_eclass(egraph, id))
.collect(),
}
}
ENodeOrVar::Var(_) => egraph
.classes()
.filter_map(|e| self.search_eclass(egraph, e.id))
.collect(),
}
}
fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> {
let substs = self.program.run(egraph, eclass);
if substs.is_empty() {
None
} else {
let ast = Some(Cow::Borrowed(&self.ast));
Some(SearchMatches {
eclass,
substs,
ast,
})
}
}
fn vars | lf) -> Vec<Var> {
Pattern::vars(self)
}
}
impl<L, A> Applier<L, A> for Pattern<L>
where
L: Language,
A: Analysis<L>,
{
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn apply_matches(
&self,
egraph: &mut EGraph<L, A>,
matches: &[SearchMatches<L>],
rule_name: Symbol,
) -> Vec<Id> {
let mut added = vec![];
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
for mat in matches {
let sast = mat.ast.as_ref().map(|cow| cow.as_ref());
for | (&se | identifier_name |
pattern.rs | Display> Pattern<L> {
/// Pretty print this pattern as a sexp with the given width
pub fn pretty(&self, width: usize) -> String {
self.ast.pretty(width)
}
}
/// The language of [`Pattern`]s.
///
#[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub enum ENodeOrVar<L> {
/// An enode from the underlying [`Language`]
ENode(L),
/// A pattern variable
Var(Var),
}
impl<L: Language> Language for ENodeOrVar<L> {
fn matches(&self, _other: &Self) -> bool {
panic!("Should never call this")
}
fn children(&self) -> &[Id] {
match self {
ENodeOrVar::ENode(n) => n.children(),
ENodeOrVar::Var(_) => &[],
}
}
fn children_mut(&mut self) -> &mut [Id] {
match self {
ENodeOrVar::ENode(n) => n.children_mut(),
ENodeOrVar::Var(_) => &mut [],
}
}
}
impl<L: Language + Display> Display for ENodeOrVar<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::ENode(node) => Display::fmt(node, f),
Self::Var(var) => Display::fmt(var, f),
}
}
}
#[derive(Debug, Error)]
pub enum ENodeOrVarParseError<E> {
#[error(transparent)]
BadVar(<Var as FromStr>::Err),
#[error("tried to parse pattern variable {0:?} as an operator")]
UnexpectedVar(String),
#[error(transparent)]
BadOp(E),
}
impl<L: FromOp> FromOp for ENodeOrVar<L> {
type Error = ENodeOrVarParseError<L::Error>;
fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> {
use ENodeOrVarParseError::*;
if op.starts_with('?') && op.len() > 1 {
if children.is_empty() {
op.parse().map(Self::Var).map_err(BadVar)
} else {
Err(UnexpectedVar(op.to_owned()))
}
} else {
L::from_op(op, children).map(Self::ENode).map_err(BadOp)
}
}
}
impl<L: FromOp> std::str::FromStr for Pattern<L> {
type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
PatternAst::from_str(s).map(Self::from)
}
}
impl<'a, L: Language> From<&'a [L]> for Pattern<L> {
fn from(expr: &'a [L]) -> Self {
let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect();
let ast = RecExpr::from(nodes);
Self::new(ast)
}
}
impl<L: Language> From<PatternAst<L>> for Pattern<L> {
fn from(ast: PatternAst<L>) -> Self {
Self::new(ast)
}
}
impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> {
type Error = Var;
fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> {
let nodes = pat.ast.as_ref().iter().cloned();
let ns: Result<Vec<_>, _> = nodes
.map(|n| match n {
ENodeOrVar::ENode(n) => Ok(n),
ENodeOrVar::Var(v) => Err(v),
})
.collect();
ns.map(RecExpr::from)
}
}
impl<L: Language + Display> Display for Pattern<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.ast, f)
}
}
/// The result of searching a [`Searcher`] over one eclass.
///
/// Note that one [`SearchMatches`] can contain many found
/// substititions. So taking the length of a list of [`SearchMatches`]
/// tells you how many eclasses something was matched in, _not_ how
/// many matches were found total.
///
#[derive(Debug)]
pub struct SearchMatches<'a, L: Language> {
/// The eclass id that these matches were found in.
pub eclass: Id,
/// The substitutions for each match.
pub substs: Vec<Subst>,
/// Optionally, an ast for the matches used in proof production.
pub ast: Option<Cow<'a, PatternAst<L>>>,
}
impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> {
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> {
match self.ast.as_ref().last().unwrap() {
ENodeOrVar::ENode(e) => {
#[allow(clippy::mem_discriminant_non_enum)]
let key = std::mem::discriminant(e);
match egraph.classes_by_op.get(&key) {
None => vec![],
Some(ids) => ids
.iter()
.filter_map(|&id| self.search_eclass(egraph, id))
.collect(),
}
}
ENodeOrVar::Var(_) => egraph
.classes()
.filter_map(|e| self.search_eclass(egraph, e.id))
.collect(),
}
}
fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> {
let substs = self.program.run(egraph, eclass);
if substs.is_empty() {
None
} else {
let ast = Some(Cow::Borrowed(&self.ast));
Some(SearchMatches {
eclass,
substs,
ast,
})
}
}
fn vars(&self) -> Vec<Var> {
Pattern::vars(self)
}
}
impl<L, A> Applier<L, A> for Pattern<L>
where
L: Language,
A: Analysis<L>,
{
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn apply_matches(
&self,
egraph: &mut EGraph<L, A>,
matches: &[SearchMatches<L>],
rule_name: Symbol,
) -> Vec<Id> {
let mut added = vec![];
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
for mat in matches {
let sast = mat.ast.as_ref().map(|cow| cow.as_ref());
for subst in &mat.substs {
let did_something;
let id;
if egraph.are_explanations_enabled() {
let (id_temp, did_something_temp) =
egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name);
did_something = did_something_temp;
id = id_temp;
} else {
id = apply_pat(&mut id_buf, ast, egraph, subst);
did_something = egraph.union(id, mat.eclass);
}
if did_something {
added.push(id)
}
}
}
added
}
fn apply_one(
&self,
egraph: &mut EGraph<L, A>,
eclass: Id,
subst: &Subst,
searcher_ast: Option<&PatternAst<L>>,
rule_name: Symbol,
) -> Vec<Id> {
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
let id = apply_pat(&mut id_buf, ast, egraph, subst);
if let Some(ast) = searcher_ast {
let (from, did_something) =
egraph.union_instantiations(ast, &self.ast, subst, rule_name);
if did_something {
vec![from]
} else {
vec![]
}
} else if egraph.union(eclass, id) {
vec![eclass]
} else {
vec![]
}
}
fn vars(&self) -> Vec<Var> {
Pattern::vars(self)
}
}
pub(crate) fn apply_pat<L: Language, A: Analysis<L>>(
ids: &mut [Id],
pat: &[ENodeOrVar<L>],
egraph: &mut EGraph<L, A>,
subst: &Subst,
) -> Id {
debug_assert_eq!(pat.len(), ids.len());
trace!("apply_rec {:2?} {:?}", pat, subst);
for (i, pat_node) in pat.iter().enumerate() {
let id = match pat_node {
ENodeOrVar::Var(w) => subst[*w],
ENodeOrVar::ENode(e) => {
let n = e.clone().map_children(|child| ids[usize::from(child)]);
trace!("adding: {:?}", n);
egraph.add(n)
} | };
ids[i] = id; | random_line_split |
|
pattern.rs | // This is the search method from the Searcher trait
/// let matches = same_add.search(&egraph);
/// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect();
/// assert_eq!(matched_eclasses, vec![a11, a22]);
/// ```
///
/// [`FromStr`]: std::str::FromStr
#[derive(Debug, PartialEq, Clone)]
pub struct Pattern<L> {
/// The actual pattern as a [`RecExpr`]
pub ast: PatternAst<L>,
program: machine::Program<L>,
}
/// A [`RecExpr`] that represents a
/// [`Pattern`].
pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>;
impl<L: Language> PatternAst<L> {
/// Returns a new `PatternAst` with the variables renames canonically
pub fn alpha_rename(&self) -> Self {
let mut vars = HashMap::<Var, Var>::default();
let mut new = PatternAst::default();
fn mkvar(i: usize) -> Var {
let vs = &["?x", "?y", "?z", "?w"];
match vs.get(i) {
Some(v) => v.parse().unwrap(),
None => format!("?v{}", i - vs.len()).parse().unwrap(),
}
}
for n in self.as_ref() {
new.add(match n {
ENodeOrVar::ENode(_) => n.clone(),
ENodeOrVar::Var(v) => {
let i = vars.len();
ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i)))
}
});
}
new
}
}
impl<L: Language> Pattern<L> {
/// Creates a new pattern from the given pattern ast.
pub fn new(ast: PatternAst<L>) -> Self {
let ast = ast.compact();
let program = machine::Program::compile_from_pat(&ast);
Pattern { ast, program }
}
/// Returns a list of the [`Var`]s in this pattern.
pub fn vars(&self) -> Vec<Var> {
let mut vars = vec![];
for n in self.ast.as_ref() {
if let ENodeOrVar::Var(v) = n {
if !vars.contains(v) {
vars.push(*v)
}
}
}
vars
}
}
impl<L: Language + Display> Pattern<L> {
/// Pretty print this pattern as a sexp with the given width
pub fn pretty(&self, width: usize) -> String {
self.ast.pretty(width)
}
}
/// The language of [`Pattern`]s.
///
#[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub enum ENodeOrVar<L> {
/// An enode from the underlying [`Language`]
ENode(L),
/// A pattern variable
Var(Var),
}
impl<L: Language> Language for ENodeOrVar<L> {
fn matches(&self, _other: &Self) -> bool {
panic!("Should never call this")
}
fn children(&self) -> &[Id] {
match self {
ENodeOrVar::ENode(n) => n.children(),
ENodeOrVar::Var(_) => &[],
}
}
fn children_mut(&mut self) -> &mut [Id] {
match self {
ENodeOrVar::ENode(n) => n.children_mut(),
ENodeOrVar::Var(_) => &mut [],
}
}
}
impl<L: Language + Display> Display for ENodeOrVar<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::ENode(node) => Display::fmt(node, f),
Self::Var(var) => Display::fmt(var, f),
}
}
}
#[derive(Debug, Error)]
pub enum ENodeOrVarParseError<E> {
#[error(transparent)]
BadVar(<Var as FromStr>::Err),
#[error("tried to parse pattern variable {0:?} as an operator")]
UnexpectedVar(String),
#[error(transparent)]
BadOp(E),
}
impl<L: FromOp> FromOp for ENodeOrVar<L> {
type Error = ENodeOrVarParseError<L::Error>;
fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> {
use ENodeOrVarParseError::*;
if op.starts_with('?') && op.len() > 1 {
if children.is_empty() {
op.parse().map(Self::Var).map_err(BadVar)
} else {
Err(UnexpectedVar(op.to_owned()))
}
} else {
L::from_op(op, children).map(Self::ENode).map_err(BadOp)
}
}
}
impl<L: FromOp> std::str::FromStr for Pattern<L> {
type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
PatternAst::from_str(s).map(Self::from)
}
}
impl<'a, L: Language> From<&'a [L]> for Pattern<L> {
fn from(expr: &'a [L]) -> Self {
let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect();
let ast = RecExpr::from(nodes);
Self::new(ast)
}
}
impl<L: Language> From<PatternAst<L>> for Pattern<L> {
fn from(ast: PatternAst<L>) -> Self {
Self::new(ast)
}
}
impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> {
type Error = Var;
fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> {
let nodes = pat.ast.as_ref().iter().cloned();
let ns: Result<Vec<_>, _> = nodes
.map(|n| match n {
ENodeOrVar::ENode(n) => Ok(n),
ENodeOrVar::Var(v) => Err(v),
})
.collect();
ns.map(RecExpr::from)
}
}
impl<L: Language + Display> Display for Pattern<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.ast, f)
}
}
/// The result of searching a [`Searcher`] over one eclass.
///
/// Note that one [`SearchMatches`] can contain many found
/// substititions. So taking the length of a list of [`SearchMatches`]
/// tells you how many eclasses something was matched in, _not_ how
/// many matches were found total.
///
#[derive(Debug)]
pub struct SearchMatches<'a, L: Language> {
/// The eclass id that these matches were found in.
pub eclass: Id,
/// The substitutions for each match.
pub substs: Vec<Subst>,
/// Optionally, an ast for the matches used in proof production.
pub ast: Option<Cow<'a, PatternAst<L>>>,
}
impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> {
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> {
match self.ast.as_ref().last().unwrap() {
ENodeOrVar::ENode(e) => {
#[allow(clippy::mem_discriminant_non_enum)]
let key = std::mem::discriminant(e);
match egraph.classes_by_op.get(&key) {
None => vec![],
Some(ids) => ids
.iter()
.filter_map(|&id| self.search_eclass(egraph, id))
.collect(),
}
}
ENodeOrVar::Var(_) => egraph
.classes()
.filter_map(|e| self.search_eclass(egraph, e.id))
.collect(),
}
}
fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> {
| fn vars(&self) -> Vec<Var> {
Pattern::vars(self)
}
}
impl<L, A> Applier<L, A> for Pattern<L>
where
L: Language,
A: Analysis<L>,
{
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn apply_matches(
&self,
egraph: &mut EGraph<L, A>,
matches: &[SearchMatches<L>],
rule_name: Symbol,
) -> Vec<Id> {
let mut added = vec![];
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
for mat in matches {
let sast = mat.ast.as_ref().map(|cow| cow.as_ref());
| let substs = self.program.run(egraph, eclass);
if substs.is_empty() {
None
} else {
let ast = Some(Cow::Borrowed(&self.ast));
Some(SearchMatches {
eclass,
substs,
ast,
})
}
}
| identifier_body |
pattern.rs | }
}
impl<L: Language + Display> Pattern<L> {
/// Pretty print this pattern as a sexp with the given width
pub fn pretty(&self, width: usize) -> String {
self.ast.pretty(width)
}
}
/// The language of [`Pattern`]s.
///
#[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub enum ENodeOrVar<L> {
/// An enode from the underlying [`Language`]
ENode(L),
/// A pattern variable
Var(Var),
}
impl<L: Language> Language for ENodeOrVar<L> {
fn matches(&self, _other: &Self) -> bool {
panic!("Should never call this")
}
fn children(&self) -> &[Id] {
match self {
ENodeOrVar::ENode(n) => n.children(),
ENodeOrVar::Var(_) => &[],
}
}
fn children_mut(&mut self) -> &mut [Id] {
match self {
ENodeOrVar::ENode(n) => n.children_mut(),
ENodeOrVar::Var(_) => &mut [],
}
}
}
impl<L: Language + Display> Display for ENodeOrVar<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::ENode(node) => Display::fmt(node, f),
Self::Var(var) => Display::fmt(var, f),
}
}
}
#[derive(Debug, Error)]
pub enum ENodeOrVarParseError<E> {
#[error(transparent)]
BadVar(<Var as FromStr>::Err),
#[error("tried to parse pattern variable {0:?} as an operator")]
UnexpectedVar(String),
#[error(transparent)]
BadOp(E),
}
impl<L: FromOp> FromOp for ENodeOrVar<L> {
type Error = ENodeOrVarParseError<L::Error>;
fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> {
use ENodeOrVarParseError::*;
if op.starts_with('?') && op.len() > 1 {
if children.is_empty() {
op.parse().map(Self::Var).map_err(BadVar)
} else {
Err(UnexpectedVar(op.to_owned()))
}
} else {
L::from_op(op, children).map(Self::ENode).map_err(BadOp)
}
}
}
impl<L: FromOp> std::str::FromStr for Pattern<L> {
type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
PatternAst::from_str(s).map(Self::from)
}
}
impl<'a, L: Language> From<&'a [L]> for Pattern<L> {
fn from(expr: &'a [L]) -> Self {
let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect();
let ast = RecExpr::from(nodes);
Self::new(ast)
}
}
impl<L: Language> From<PatternAst<L>> for Pattern<L> {
fn from(ast: PatternAst<L>) -> Self {
Self::new(ast)
}
}
impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> {
type Error = Var;
fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> {
let nodes = pat.ast.as_ref().iter().cloned();
let ns: Result<Vec<_>, _> = nodes
.map(|n| match n {
ENodeOrVar::ENode(n) => Ok(n),
ENodeOrVar::Var(v) => Err(v),
})
.collect();
ns.map(RecExpr::from)
}
}
impl<L: Language + Display> Display for Pattern<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.ast, f)
}
}
/// The result of searching a [`Searcher`] over one eclass.
///
/// Note that one [`SearchMatches`] can contain many found
/// substititions. So taking the length of a list of [`SearchMatches`]
/// tells you how many eclasses something was matched in, _not_ how
/// many matches were found total.
///
#[derive(Debug)]
pub struct SearchMatches<'a, L: Language> {
/// The eclass id that these matches were found in.
pub eclass: Id,
/// The substitutions for each match.
pub substs: Vec<Subst>,
/// Optionally, an ast for the matches used in proof production.
pub ast: Option<Cow<'a, PatternAst<L>>>,
}
impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> {
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> {
match self.ast.as_ref().last().unwrap() {
ENodeOrVar::ENode(e) => {
#[allow(clippy::mem_discriminant_non_enum)]
let key = std::mem::discriminant(e);
match egraph.classes_by_op.get(&key) {
None => vec![],
Some(ids) => ids
.iter()
.filter_map(|&id| self.search_eclass(egraph, id))
.collect(),
}
}
ENodeOrVar::Var(_) => egraph
.classes()
.filter_map(|e| self.search_eclass(egraph, e.id))
.collect(),
}
}
fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> {
let substs = self.program.run(egraph, eclass);
if substs.is_empty() {
None
} else {
let ast = Some(Cow::Borrowed(&self.ast));
Some(SearchMatches {
eclass,
substs,
ast,
})
}
}
fn vars(&self) -> Vec<Var> {
Pattern::vars(self)
}
}
impl<L, A> Applier<L, A> for Pattern<L>
where
L: Language,
A: Analysis<L>,
{
fn get_pattern_ast(&self) -> Option<&PatternAst<L>> {
Some(&self.ast)
}
fn apply_matches(
&self,
egraph: &mut EGraph<L, A>,
matches: &[SearchMatches<L>],
rule_name: Symbol,
) -> Vec<Id> {
let mut added = vec![];
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
for mat in matches {
let sast = mat.ast.as_ref().map(|cow| cow.as_ref());
for subst in &mat.substs {
let did_something;
let id;
if egraph.are_explanations_enabled() {
let (id_temp, did_something_temp) =
egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name);
did_something = did_something_temp;
id = id_temp;
} else {
id = apply_pat(&mut id_buf, ast, egraph, subst);
did_something = egraph.union(id, mat.eclass);
}
if did_something {
added.push(id)
}
}
}
added
}
fn apply_one(
&self,
egraph: &mut EGraph<L, A>,
eclass: Id,
subst: &Subst,
searcher_ast: Option<&PatternAst<L>>,
rule_name: Symbol,
) -> Vec<Id> {
let ast = self.ast.as_ref();
let mut id_buf = vec![0.into(); ast.len()];
let id = apply_pat(&mut id_buf, ast, egraph, subst);
if let Some(ast) = searcher_ast {
let (from, did_something) =
egraph.union_instantiations(ast, &self.ast, subst, rule_name);
if did_something {
vec![from]
} else {
vec![]
}
} else if egraph.union(eclass, id) {
vec![eclass]
} else {
vec![]
}
}
fn vars(&self) -> Vec<Var> {
Pattern::vars(self)
}
}
pub(crate) fn apply_pat<L: Language, A: Analysis<L>>(
ids: &mut [Id],
pat: &[ENodeOrVar<L>],
egraph: &mut EGraph<L, A>,
subst: &Subst,
) -> Id {
debug_assert_eq!(pat.len(), ids.len());
trace!("apply_rec {:2?} {:?}", pat, subst);
for (i, pat_node) in pat.iter().enumerate() {
let id = match pat_node {
ENodeOrVar::Var(w) => subst[*w],
ENodeOrVar::ENode(e) => {
| let n = e.clone().map_children(|child| ids[usize::from(child)]);
trace!("adding: {:?}", n);
egraph.add(n)
}
| conditional_block |
|
main.rs | v| v.set_year(bib.year())),
}
}
}
#[derive(Serialize, Deserialize)]
enum Degree {
BS,
MS,
PhD,
}
impl Degree {
fn | (&self) -> String {
match self {
Self::BS => "Bachelor of Science".into(),
Self::MS => "Master of Science".into(),
Self::PhD => "PhD".into(),
}
}
}
#[derive(Serialize, Deserialize)]
struct Education {
institution: String,
degree: Degree,
major: String,
duration: DateRange,
#[serde(skip_serializing_if = "Option::is_none", default)]
location: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
gpa: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none", default)]
courses: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize)]
struct Experience {
company: String,
position: String,
duration: DateRange,
description: String,
#[serde(skip_serializing_if = "Option::is_none", default)]
location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
tags: Vec<String>,
}
#[derive(Serialize, Deserialize)]
struct Contact {
#[serde(rename = "type")]
type_: String,
value: String,
}
#[derive(Serialize, Deserialize)]
struct Skill {
category: String,
#[serde(default)]
description: Option<String>,
}
#[derive(Serialize, Deserialize)]
struct Person {
name: String,
#[serde(default)]
resume_url: Option<String>,
contacts: Vec<Contact>,
educations: Vec<Education>,
experiences: Vec<Experience>,
projects: Vec<ProjectParam>,
#[serde(default)]
skills: Vec<Skill>,
#[serde(default)]
references: HashMap<String, Citation>,
#[serde(default)]
publications: Vec<Citation>,
}
#[allow(clippy::large_enum_variant)]
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
enum ProjectParam {
Import(ProjectImport),
Sort { order_by: ProjectSortOrder },
ImportMode { import_mode: ProjectImportMode },
Raw(Project),
}
#[derive(Serialize, Deserialize, Copy, Clone, PartialEq)]
#[serde(rename_all = "snake_case")]
enum ProjectImportMode {
Whitelist,
Combine,
}
impl Default for ProjectImportMode {
fn default() -> Self {
Self::Combine
}
}
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(rename_all = "snake_case")]
enum ProjectSortOrder {
Stars,
Forks,
StarsThenForks,
ForksThenStars,
Manual,
}
#[derive(Serialize, Deserialize)]
#[serde(tag = "from", rename_all = "lowercase")]
enum ProjectImport {
GitHub {
#[serde(default)]
ignore_forks: bool,
#[serde(default)]
repos: Option<Vec<String>>,
#[serde(default)]
token: Option<String>,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
enum ProjectRole {
Owner,
Maintainer,
Contributor,
}
/// Single digit precision deciaml real number
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)]
struct Decimal1(u64);
impl ::std::fmt::Display for Decimal1 {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
<f64 as ::std::fmt::Display>::fmt(&(*self).into(), f)
}
}
impl ::std::ops::Add<Decimal1> for Decimal1 {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self(rhs.0 + self.0)
}
}
impl ::std::ops::AddAssign<Decimal1> for Decimal1 {
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0;
}
}
impl From<f64> for Decimal1 {
fn from(f: f64) -> Self {
Self((f * 10.0) as u64)
}
}
impl From<Decimal1> for f64 {
fn from(f: Decimal1) -> f64 {
f.0 as f64 / 10.0
}
}
impl<'de> ::serde::Deserialize<'de> for Decimal1 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> ::serde::de::Visitor<'de> for Visitor {
type Value = Decimal1;
fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
write!(fmt, "a float")
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(v.into())
}
}
deserializer.deserialize_f64(Visitor)
}
}
impl ::serde::Serialize for Decimal1 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_f64((*self).into())
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct LanguageStat {
language: String,
percentage: Decimal1,
}
#[serde_with::serde_as]
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Project {
name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
contributions: Option<String>,
#[serde(
with = "serde_option_display_fromstr",
default,
skip_serializing_if = "Option::is_none"
)]
url: Option<url::Url>,
#[serde(default, skip_serializing_if = "Option::is_none")]
stars: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
forks: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
active: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
owner: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
commits: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
additions: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
deletions: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
languages: Vec<LanguageStat>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
tags: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
role: Option<ProjectRole>,
}
mod serde_option_display_fromstr {
pub(crate) fn deserialize<'de, D, T>(deser: D) -> Result<Option<T>, D::Error>
where
D: serde::Deserializer<'de>,
T: ::std::str::FromStr,
<T as ::std::str::FromStr>::Err: ::std::fmt::Display,
{
#[derive(Default)]
struct Visitor<T>(::std::marker::PhantomData<T>);
impl<'de, T> serde::de::Visitor<'de> for Visitor<T>
where
T: ::std::str::FromStr,
<T as ::std::str::FromStr>::Err: ::std::fmt::Display,
{
type Value = Option<T>;
fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
write!(fmt, "a string")
}
fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> {
v.parse()
.map_err(serde::de::Error::custom)
.map(Option::Some)
}
}
deser.deserialize_str(Visitor::<T>(Default::default()))
}
pub(crate) fn serialize<S, T>(v: &Option<T>, ser: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
T: ::std::fmt::Display,
{
match v {
Some(v) => ser.serialize_str(&v.to_string()),
None => ser.serialize_none(),
}
}
}
use askama::Template;
struct ContactParams {
value: String,
icon: Option<String>,
link: Option<String>,
}
#[derive(Template)]
#[template(path = "resume.html", escape = "none")]
struct ResumeParams<'a> {
name: &'a str,
resume_url: Option<&'a str>,
contacts: Vec<ContactParams>,
educations: &'a [Education],
experiences: &'a [Experience],
projects: Vec<Project>,
references: Vec<(&'a str, &'a str)>,
publications: Vec<(&'a str, Option<u32>)>,
skills: &' | to_resume_string | identifier_name |
main.rs | fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> {
let s = String::deserialize(d)?;
s.parse().map_err(serde::de::Error::custom)
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum Citation {
Raw(String),
RawWithYear { text: String, year: Option<u32> },
Url(citation::UrlCitation),
Doi(citation::DoiCitation),
Bibtex(citation::BibtexCitation),
}
impl Citation {
fn to_raw(&self) -> Option<Citation> {
use Citation::*;
match self {
Raw(s) => Some(Raw(s.clone())),
RawWithYear { text, .. } => Some(Raw(text.clone())),
Url(url) => url.to_raw(),
Doi(doi) => doi.to_raw(),
Bibtex(bib) => bib.to_raw(),
}
}
fn set_year(self, year: Option<u32>) -> Citation {
use Citation::*;
if let Raw(s) = self {
RawWithYear { text: s, year }
} else {
self
}
}
fn to_raw_with_year(&self) -> Option<Citation> {
use Citation::*;
match self {
Raw(s) => Some(RawWithYear {
text: s.clone(),
year: None,
}),
RawWithYear { text, year } => Some(RawWithYear {
text: text.clone(),
year: *year,
}),
Url(url) => url.to_raw().map(|v| v.set_year(url.year())),
Doi(doi) => doi.to_raw().map(|v| v.set_year(doi.year())),
Bibtex(bib) => bib.to_raw().map(|v| v.set_year(bib.year())),
}
}
}
#[derive(Serialize, Deserialize)]
enum Degree {
BS,
MS,
PhD,
}
impl Degree {
fn to_resume_string(&self) -> String {
match self {
Self::BS => "Bachelor of Science".into(),
Self::MS => "Master of Science".into(),
Self::PhD => "PhD".into(),
}
}
}
#[derive(Serialize, Deserialize)]
struct Education {
institution: String,
degree: Degree,
major: String,
duration: DateRange,
#[serde(skip_serializing_if = "Option::is_none", default)]
location: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
gpa: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none", default)]
courses: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize)]
struct Experience {
company: String,
position: String,
duration: DateRange,
description: String,
#[serde(skip_serializing_if = "Option::is_none", default)]
location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
tags: Vec<String>,
}
#[derive(Serialize, Deserialize)]
struct Contact {
#[serde(rename = "type")]
type_: String,
value: String,
}
#[derive(Serialize, Deserialize)]
struct Skill {
category: String,
#[serde(default)]
description: Option<String>,
}
#[derive(Serialize, Deserialize)]
struct Person {
name: String,
#[serde(default)]
resume_url: Option<String>,
contacts: Vec<Contact>,
educations: Vec<Education>,
experiences: Vec<Experience>,
projects: Vec<ProjectParam>,
#[serde(default)]
skills: Vec<Skill>,
#[serde(default)]
references: HashMap<String, Citation>,
#[serde(default)]
publications: Vec<Citation>,
}
#[allow(clippy::large_enum_variant)]
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
enum ProjectParam {
Import(ProjectImport),
Sort { order_by: ProjectSortOrder },
ImportMode { import_mode: ProjectImportMode },
Raw(Project),
}
#[derive(Serialize, Deserialize, Copy, Clone, PartialEq)]
#[serde(rename_all = "snake_case")]
enum ProjectImportMode {
Whitelist,
Combine,
}
impl Default for ProjectImportMode {
fn default() -> Self {
Self::Combine
}
}
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(rename_all = "snake_case")]
enum ProjectSortOrder {
Stars,
Forks,
StarsThenForks,
ForksThenStars,
Manual,
}
#[derive(Serialize, Deserialize)]
#[serde(tag = "from", rename_all = "lowercase")]
enum ProjectImport {
GitHub {
#[serde(default)]
ignore_forks: bool,
#[serde(default)]
repos: Option<Vec<String>>,
#[serde(default)]
token: Option<String>,
},
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
enum ProjectRole {
Owner,
Maintainer,
Contributor,
}
/// Single digit precision deciaml real number
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)]
struct Decimal1(u64);
impl ::std::fmt::Display for Decimal1 {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
<f64 as ::std::fmt::Display>::fmt(&(*self).into(), f)
}
}
impl ::std::ops::Add<Decimal1> for Decimal1 {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self(rhs.0 + self.0)
}
}
impl ::std::ops::AddAssign<Decimal1> for Decimal1 {
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0;
}
}
impl From<f64> for Decimal1 {
fn from(f: f64) -> Self {
Self((f * 10.0) as u64)
}
}
impl From<Decimal1> for f64 {
fn from(f: Decimal1) -> f64 {
f.0 as f64 / 10.0
}
}
impl<'de> ::serde::Deserialize<'de> for Decimal1 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> ::serde::de::Visitor<'de> for Visitor {
type Value = Decimal1;
fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
write!(fmt, "a float")
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: ::serde::de::Error,
{
Ok(v.into())
}
}
deserializer.deserialize_f64(Visitor)
}
}
impl ::serde::Serialize for Decimal1 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_f64((*self).into())
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct LanguageStat {
language: String,
percentage: Decimal1,
}
#[serde_with::serde_as]
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Project {
name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
contributions: Option<String>,
#[serde(
with = "serde_option_display_fromstr",
default,
skip_serializing_if = "Option::is_none"
)]
url: Option<url::Url>,
#[serde(default, skip_serializing_if = "Option::is_none")]
stars: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
forks: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
active: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
owner: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
commits: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
additions: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
deletions: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
languages: Vec<LanguageStat>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
tags: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
role: Option<ProjectRole>,
}
mod serde_option_display_fromstr {
pub(crate) fn deserialize<'de, D, T>(deser: D) -> Result<Option<T>, D::Error>
where
D: serde::Deserializer<'de>,
T: ::std::str::FromStr,
<T as ::std::str::FromStr>::Err: ::std::fmt::Display,
| }
impl<'a> Deserialize<'a> for DateRange { | random_line_split |
|
readbuf.rs | /// A borrowed byte buffer which is incrementally filled and initialized.
///
/// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the
/// buffer that has been logically filled with data, a region that has been initialized at some point but not yet
/// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a
/// subset of the initialized region.
///
/// In summary, the contents of the buffer can be visualized as:
/// ```not_rust
/// [ capacity ]
/// [ filled | unfilled ]
/// [ initialized | uninitialized ]
/// ```
///
/// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference
/// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be
/// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor
/// has write-only access to the unfilled portion of the buffer (you can think of it as a
/// write-only iterator).
///
/// The lifetime `'data` is a bound on the lifetime of the underlying data.
pub struct BorrowedBuf<'data> {
/// The buffer's underlying data.
buf: &'data mut [MaybeUninit<u8>],
/// The length of `self.buf` which is known to be filled.
filled: usize,
/// The length of `self.buf` which is known to be initialized.
init: usize,
}
impl Debug for BorrowedBuf<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("BorrowedBuf")
.field("init", &self.init)
.field("filled", &self.filled)
.field("capacity", &self.capacity())
.finish()
}
}
/// Create a new `BorrowedBuf` from a fully initialized slice.
impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> {
#[inline]
fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> {
let len = slice.len();
BorrowedBuf {
// SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf
buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() },
filled: 0,
init: len,
}
}
}
/// Create a new `BorrowedBuf` from an uninitialized buffer.
///
/// Use `set_init` if part of the buffer is known to be already initialized.
impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> {
#[inline]
fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> {
BorrowedBuf { buf, filled: 0, init: 0 }
}
}
impl<'data> BorrowedBuf<'data> {
/// Returns the total capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Returns the length of the filled part of the buffer.
#[inline]
pub fn len(&self) -> usize {
self.filled
}
/// Returns the length of the initialized part of the buffer.
#[inline]
pub fn init_len(&self) -> usize {
self.init
}
/// Returns a shared reference to the filled portion of the buffer.
#[inline]
pub fn filled(&self) -> &[u8] {
// SAFETY: We only slice the filled part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
}
/// Returns a cursor over the unfilled part of the buffer.
#[inline]
pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
start: self.filled,
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self)
},
}
}
/// Clears the buffer, resetting the filled region to empty.
///
/// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
#[inline]
pub fn clear(&mut self) -> &mut Self {
self.filled = 0;
self
}
/// Asserts that the first `n` bytes of the buffer are initialized.
///
/// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
/// bytes than are already known to be initialized.
///
/// # Safety
///
/// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
#[inline]
pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
self.init = cmp::max(self.init, n);
self
}
}
/// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf).
///
/// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`.
/// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or
/// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the
/// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform
/// the cursor how many bytes have been written.
///
/// Once data is written to the cursor, it becomes part of the filled portion of the underlying
/// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks
/// the unfilled part of the underlying `BorrowedBuf`.
///
/// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound
/// on the data in that buffer by transitivity).
#[derive(Debug)]
pub struct BorrowedCursor<'a> {
/// The underlying buffer.
// Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when
// we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into
// it, so don't do that!
buf: &'a mut BorrowedBuf<'a>,
/// The length of the filled portion of the underlying buffer at the time of the cursor's
/// creation.
start: usize,
}
impl<'a> BorrowedCursor<'a> {
/// Reborrow this cursor by cloning it with a smaller lifetime.
///
/// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is
/// not accessible while the new cursor exists.
#[inline]
pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>(
self.buf,
)
},
start: self.start,
}
}
/// Returns the available space in the cursor.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.capacity() - self.buf.filled
}
/// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`.
///
/// Note that if this cursor is a reborrowed clone of another, then the count returned is the
/// count written via either cursor, not the count since the cursor was reborrowed.
#[inline]
pub fn written(&self) -> usize {
self.buf.filled - self.start
}
/// Returns a shared reference to the initialized portion of the cursor.
#[inline]
pub fn init_ref(&self) -> &[u8] {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) }
}
/// Returns a mutable reference to the initialized portion of the cursor.
#[inline]
pub fn init_mut(&mut self) -> &mut [u8] {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe {
MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init])
}
}
/// Returns a mutable reference to the uninitialized part of the cursor.
///
/// It is safe to uninitialize any of these bytes.
#[inline]
pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.init..]
}
/// Returns a mutable reference to the whole cursor.
///
/// # Safety
///
/// | random_line_split |
||
readbuf.rs | some point but not yet
/// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a
/// subset of the initialized region.
///
/// In summary, the contents of the buffer can be visualized as:
/// ```not_rust
/// [ capacity ]
/// [ filled | unfilled ]
/// [ initialized | uninitialized ]
/// ```
///
/// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference
/// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be
/// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor
/// has write-only access to the unfilled portion of the buffer (you can think of it as a
/// write-only iterator).
///
/// The lifetime `'data` is a bound on the lifetime of the underlying data.
pub struct BorrowedBuf<'data> {
/// The buffer's underlying data.
buf: &'data mut [MaybeUninit<u8>],
/// The length of `self.buf` which is known to be filled.
filled: usize,
/// The length of `self.buf` which is known to be initialized.
init: usize,
}
impl Debug for BorrowedBuf<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("BorrowedBuf")
.field("init", &self.init)
.field("filled", &self.filled)
.field("capacity", &self.capacity())
.finish()
}
}
/// Create a new `BorrowedBuf` from a fully initialized slice.
impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> {
#[inline]
fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> {
let len = slice.len();
BorrowedBuf {
// SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf
buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() },
filled: 0,
init: len,
}
}
}
/// Create a new `BorrowedBuf` from an uninitialized buffer.
///
/// Use `set_init` if part of the buffer is known to be already initialized.
impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> {
#[inline]
fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> {
BorrowedBuf { buf, filled: 0, init: 0 }
}
}
impl<'data> BorrowedBuf<'data> {
/// Returns the total capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Returns the length of the filled part of the buffer.
#[inline]
pub fn len(&self) -> usize {
self.filled
}
/// Returns the length of the initialized part of the buffer.
#[inline]
pub fn init_len(&self) -> usize {
self.init
}
/// Returns a shared reference to the filled portion of the buffer.
#[inline]
pub fn filled(&self) -> &[u8] {
// SAFETY: We only slice the filled part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
}
/// Returns a cursor over the unfilled part of the buffer.
#[inline]
pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
start: self.filled,
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self)
},
}
}
/// Clears the buffer, resetting the filled region to empty.
///
/// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
#[inline]
pub fn clear(&mut self) -> &mut Self {
self.filled = 0;
self
}
/// Asserts that the first `n` bytes of the buffer are initialized.
///
/// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
/// bytes than are already known to be initialized.
///
/// # Safety
///
/// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
#[inline]
pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
self.init = cmp::max(self.init, n);
self
}
}
/// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf).
///
/// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`.
/// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or
/// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the
/// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform
/// the cursor how many bytes have been written.
///
/// Once data is written to the cursor, it becomes part of the filled portion of the underlying
/// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks
/// the unfilled part of the underlying `BorrowedBuf`.
///
/// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound
/// on the data in that buffer by transitivity).
#[derive(Debug)]
pub struct BorrowedCursor<'a> {
/// The underlying buffer.
// Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when
// we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into
// it, so don't do that!
buf: &'a mut BorrowedBuf<'a>,
/// The length of the filled portion of the underlying buffer at the time of the cursor's
/// creation.
start: usize,
}
impl<'a> BorrowedCursor<'a> {
/// Reborrow this cursor by cloning it with a smaller lifetime.
///
/// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is
/// not accessible while the new cursor exists.
#[inline]
pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>(
self.buf,
)
},
start: self.start,
}
}
/// Returns the available space in the cursor.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.capacity() - self.buf.filled
}
/// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`.
///
/// Note that if this cursor is a reborrowed clone of another, then the count returned is the
/// count written via either cursor, not the count since the cursor was reborrowed.
#[inline]
pub fn written(&self) -> usize {
self.buf.filled - self.start
}
/// Returns a shared reference to the initialized portion of the cursor.
#[inline]
pub fn init_ref(&self) -> &[u8] |
/// Returns a mutable reference to the initialized portion of the cursor.
#[inline]
pub fn init_mut(&mut self) -> &mut [u8] {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe {
MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init])
}
}
/// Returns a mutable reference to the uninitialized part of the cursor.
///
/// It is safe to uninitialize any of these bytes.
#[inline]
pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.init..]
}
/// Returns a mutable reference to the whole cursor.
///
/// # Safety
///
/// The caller must not uninitialize any bytes in the initialized portion of the cursor.
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.filled..]
}
| {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) }
} | identifier_body |
readbuf.rs | { buf, filled: 0, init: 0 }
}
}
impl<'data> BorrowedBuf<'data> {
/// Returns the total capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Returns the length of the filled part of the buffer.
#[inline]
pub fn len(&self) -> usize {
self.filled
}
/// Returns the length of the initialized part of the buffer.
#[inline]
pub fn init_len(&self) -> usize {
self.init
}
/// Returns a shared reference to the filled portion of the buffer.
#[inline]
pub fn filled(&self) -> &[u8] {
// SAFETY: We only slice the filled part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
}
/// Returns a cursor over the unfilled part of the buffer.
#[inline]
pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
start: self.filled,
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self)
},
}
}
/// Clears the buffer, resetting the filled region to empty.
///
/// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
#[inline]
pub fn clear(&mut self) -> &mut Self {
self.filled = 0;
self
}
/// Asserts that the first `n` bytes of the buffer are initialized.
///
/// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
/// bytes than are already known to be initialized.
///
/// # Safety
///
/// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
#[inline]
pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
self.init = cmp::max(self.init, n);
self
}
}
/// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf).
///
/// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`.
/// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or
/// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the
/// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform
/// the cursor how many bytes have been written.
///
/// Once data is written to the cursor, it becomes part of the filled portion of the underlying
/// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks
/// the unfilled part of the underlying `BorrowedBuf`.
///
/// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound
/// on the data in that buffer by transitivity).
#[derive(Debug)]
pub struct BorrowedCursor<'a> {
/// The underlying buffer.
// Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when
// we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into
// it, so don't do that!
buf: &'a mut BorrowedBuf<'a>,
/// The length of the filled portion of the underlying buffer at the time of the cursor's
/// creation.
start: usize,
}
impl<'a> BorrowedCursor<'a> {
/// Reborrow this cursor by cloning it with a smaller lifetime.
///
/// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is
/// not accessible while the new cursor exists.
#[inline]
pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> {
BorrowedCursor {
// SAFETY: we never assign into `BorrowedCursor::buf`, so treating its
// lifetime covariantly is safe.
buf: unsafe {
mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>(
self.buf,
)
},
start: self.start,
}
}
/// Returns the available space in the cursor.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.capacity() - self.buf.filled
}
/// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`.
///
/// Note that if this cursor is a reborrowed clone of another, then the count returned is the
/// count written via either cursor, not the count since the cursor was reborrowed.
#[inline]
pub fn written(&self) -> usize {
self.buf.filled - self.start
}
/// Returns a shared reference to the initialized portion of the cursor.
#[inline]
pub fn init_ref(&self) -> &[u8] {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) }
}
/// Returns a mutable reference to the initialized portion of the cursor.
#[inline]
pub fn init_mut(&mut self) -> &mut [u8] {
// SAFETY: We only slice the initialized part of the buffer, which is always valid
unsafe {
MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init])
}
}
/// Returns a mutable reference to the uninitialized part of the cursor.
///
/// It is safe to uninitialize any of these bytes.
#[inline]
pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.init..]
}
/// Returns a mutable reference to the whole cursor.
///
/// # Safety
///
/// The caller must not uninitialize any bytes in the initialized portion of the cursor.
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf.buf[self.buf.filled..]
}
/// Advance the cursor by asserting that `n` bytes have been filled.
///
/// After advancing, the `n` bytes are no longer accessible via the cursor and can only be
/// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements
/// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements.
///
/// # Safety
///
/// The caller must ensure that the first `n` bytes of the cursor have been properly
/// initialised.
#[inline]
pub unsafe fn advance(&mut self, n: usize) -> &mut Self {
self.buf.filled += n;
self.buf.init = cmp::max(self.buf.init, self.buf.filled);
self
}
/// Initializes all bytes in the cursor.
#[inline]
pub fn ensure_init(&mut self) -> &mut Self {
let uninit = self.uninit_mut();
// SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation
// since it is comes from a slice reference.
unsafe {
ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len());
}
self.buf.init = self.buf.capacity();
self
}
/// Asserts that the first `n` unfilled bytes of the cursor are initialized.
///
/// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when
/// called with fewer bytes than are already known to be initialized.
///
/// # Safety
///
/// The caller must ensure that the first `n` bytes of the buffer have already been initialized.
#[inline]
pub unsafe fn set_init(&mut self, n: usize) -> &mut Self {
self.buf.init = cmp::max(self.buf.init, self.buf.filled + n);
self
}
/// Appends data to the cursor, advancing position within its buffer.
///
/// # Panics
///
/// Panics if `self.capacity()` is less than `buf.len()`.
#[inline]
pub fn append(&mut self, buf: &[u8]) {
assert!(self.capacity() >= buf.len());
// SAFETY: we do not de-initialize any of the elements of the slice
unsafe {
MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf);
}
// SAFETY: We just added the entire contents of buf to the filled section.
unsafe {
self.set_init(buf.len());
}
self.buf.filled += buf.len();
}
}
impl<'a> Write for BorrowedCursor<'a> {
fn | write | identifier_name |
|
replicated_session.go | /block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/x/ident"
m3sync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
)
type newSessionFn func(Options) (clientSession, error)
// replicatedSession is an implementation of clientSession which replicates
// session read/writes to a set of clusters asynchronously.
type replicatedSession struct {
session clientSession
asyncSessions []clientSession
newSessionFn newSessionFn
identifierPool ident.Pool
workerPool m3sync.PooledWorkerPool
replicationSemaphore chan struct{}
scope tally.Scope
log *zap.Logger
metrics replicatedSessionMetrics
outCh chan error
writeTimestampOffset time.Duration
}
type replicatedSessionMetrics struct {
replicateExecuted tally.Counter
replicateNotExecuted tally.Counter
replicateError tally.Counter
replicateSuccess tally.Counter
}
func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
return replicatedSessionMetrics{
replicateExecuted: scope.Counter("replicate.executed"),
replicateNotExecuted: scope.Counter("replicate.not-executed"),
replicateError: scope.Counter("replicate.error"),
replicateSuccess: scope.Counter("replicate.success"),
}
}
// Ensure replicatedSession implements the clientSession interface.
var _ clientSession = (*replicatedSession)(nil)
type replicatedSessionOption func(*replicatedSession)
func withNewSessionFn(fn newSessionFn) replicatedSessionOption {
return func(session *replicatedSession) {
session.newSessionFn = fn
}
}
func newReplicatedSession(
opts Options, asyncOpts []Options, options ...replicatedSessionOption,
) (clientSession, error) {
workerPool := opts.AsyncWriteWorkerPool()
scope := opts.InstrumentOptions().MetricsScope()
session := replicatedSession{
newSessionFn: newSession,
identifierPool: opts.IdentifierPool(),
workerPool: workerPool,
replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()),
scope: scope,
log: opts.InstrumentOptions().Logger(),
metrics: newReplicatedSessionMetrics(scope),
writeTimestampOffset: opts.WriteTimestampOffset(),
}
// Apply options
for _, option := range options {
option(&session)
}
if err := session.setSession(opts); err != nil {
return nil, err
}
if err := session.setAsyncSessions(asyncOpts); err != nil {
return nil, err
}
return &session, nil
}
func (s *replicatedSession) setSession(opts Options) error {
if opts.TopologyInitializer() == nil {
return nil
}
session, err := s.newSessionFn(opts)
if err != nil {
return err
}
s.session = session
return nil
}
func (s *replicatedSession) setAsyncSessions(opts []Options) error {
sessions := make([]clientSession, 0, len(opts))
for i, oo := range opts {
subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i))
oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope))
session, err := s.newSessionFn(oo)
if err != nil {
return err
}
sessions = append(sessions, session)
}
s.asyncSessions = sessions
return nil
}
type replicatedParams struct {
namespace ident.ID
id ident.ID
t xtime.UnixNano
value float64
unit xtime.Unit
annotation []byte
tags ident.TagIterator
useTags bool
}
// NB(srobb): it would be a nicer to accept a lambda which is the fn to
// be performed on all sessions, however this causes an extra allocation.
func (s replicatedSession) replicate(params replicatedParams) error {
for _, asyncSession := range s.asyncSessions {
asyncSession := asyncSession // capture var
var (
clonedID = s.identifierPool.Clone(params.id)
clonedNS = s.identifierPool.Clone(params.namespace)
clonedTags ident.TagIterator
)
if params.useTags {
clonedTags = params.tags.Duplicate()
}
select {
case s.replicationSemaphore <- struct{}{}:
s.workerPool.Go(func() {
var err error
if params.useTags {
err = asyncSession.WriteTagged(
clonedNS, clonedID, clonedTags, params.t,
params.value, params.unit, params.annotation,
)
} else {
err = asyncSession.Write(
clonedNS, clonedID, params.t,
params.value, params.unit, params.annotation,
)
}
if err != nil {
s.metrics.replicateError.Inc(1)
s.log.Error("could not replicate write", zap.Error(err))
} else {
s.metrics.replicateSuccess.Inc(1)
}
if s.outCh != nil {
s.outCh <- err
}
<-s.replicationSemaphore
})
s.metrics.replicateExecuted.Inc(1)
default:
s.metrics.replicateNotExecuted.Inc(1)
}
}
if params.useTags {
return s.session.WriteTagged(
params.namespace, params.id, params.tags, params.t,
params.value, params.unit, params.annotation,
)
}
return s.session.Write(
params.namespace, params.id, params.t,
params.value, params.unit, params.annotation,
)
}
func (s *replicatedSession) ReadClusterAvailability() (bool, error) {
return s.session.ReadClusterAvailability()
}
func (s *replicatedSession) WriteClusterAvailability() (bool, error) {
return s.session.WriteClusterAvailability()
}
// Write value to the database for an ID.
func (s replicatedSession) Write(
namespace, id ident.ID, t xtime.UnixNano, value float64,
unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
})
}
// WriteTagged value to the database for an ID and given tags.
func (s replicatedSession) WriteTagged(
namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano,
value float64, unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
tags: tags,
useTags: true,
})
}
// Fetch values from the database for an ID.
func (s replicatedSession) Fetch(
namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterator, error) {
return s.session.Fetch(namespace, id, startInclusive, endExclusive)
}
// FetchIDs values from the database for a set of IDs.
func (s replicatedSession) FetchIDs(
namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterators, error) {
return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive)
}
// Aggregate aggregates values from the database for the given set of constraints.
func (s replicatedSession) Aggregate(
ctx context.Context,
ns ident.ID,
q index.Query,
opts index.AggregationOptions,
) (AggregatedTagsIterator, FetchResponseMetadata, error) {
return s.session.Aggregate(ctx, ns, q, opts)
}
// FetchTagged resolves the provided query to known IDs, and fetches the data for them.
func (s replicatedSession) FetchTagged(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (encoding.SeriesIterators, FetchResponseMetadata, error) {
return s.session.FetchTagged(ctx, namespace, q, opts)
}
// FetchTaggedIDs resolves the provided query to known IDs.
func (s replicatedSession) FetchTaggedIDs(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (TaggedIDsIterator, FetchResponseMetadata, error) {
return s.session.FetchTaggedIDs(ctx, namespace, q, opts)
}
// ShardID returns the given shard for an ID for callers
// to easily discern what shard is failing when operations
// for given IDs begin failing.
func (s replicatedSession) ShardID(id ident.ID) (uint32, error) {
return s.session.ShardID(id)
}
// IteratorPools exposes the internal iterator pools used by the session to clients.
func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) {
return s.session.IteratorPools()
}
// Close the session.
func (s replicatedSession) Close() error {
err := s.session.Close()
for _, as := range s.asyncSessions {
if err := as.Close(); err != nil | {
s.log.Error("could not close async session: %v", zap.Error(err))
} | conditional_block |
|
replicated_session.go | /block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/x/ident"
m3sync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
)
type newSessionFn func(Options) (clientSession, error)
// replicatedSession is an implementation of clientSession which replicates
// session read/writes to a set of clusters asynchronously.
type replicatedSession struct {
session clientSession
asyncSessions []clientSession
newSessionFn newSessionFn
identifierPool ident.Pool
workerPool m3sync.PooledWorkerPool
replicationSemaphore chan struct{}
scope tally.Scope
log *zap.Logger
metrics replicatedSessionMetrics
outCh chan error
writeTimestampOffset time.Duration
}
type replicatedSessionMetrics struct {
replicateExecuted tally.Counter
replicateNotExecuted tally.Counter
replicateError tally.Counter
replicateSuccess tally.Counter
}
func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
return replicatedSessionMetrics{
replicateExecuted: scope.Counter("replicate.executed"),
replicateNotExecuted: scope.Counter("replicate.not-executed"),
replicateError: scope.Counter("replicate.error"),
replicateSuccess: scope.Counter("replicate.success"),
}
}
// Ensure replicatedSession implements the clientSession interface.
var _ clientSession = (*replicatedSession)(nil)
type replicatedSessionOption func(*replicatedSession)
func withNewSessionFn(fn newSessionFn) replicatedSessionOption {
return func(session *replicatedSession) {
session.newSessionFn = fn
}
}
func newReplicatedSession(
opts Options, asyncOpts []Options, options ...replicatedSessionOption,
) (clientSession, error) {
workerPool := opts.AsyncWriteWorkerPool()
scope := opts.InstrumentOptions().MetricsScope()
session := replicatedSession{
newSessionFn: newSession,
identifierPool: opts.IdentifierPool(),
workerPool: workerPool,
replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()),
scope: scope,
log: opts.InstrumentOptions().Logger(),
metrics: newReplicatedSessionMetrics(scope),
writeTimestampOffset: opts.WriteTimestampOffset(),
}
// Apply options
for _, option := range options {
option(&session)
}
if err := session.setSession(opts); err != nil {
return nil, err
}
if err := session.setAsyncSessions(asyncOpts); err != nil {
return nil, err
}
return &session, nil
}
func (s *replicatedSession) setSession(opts Options) error {
if opts.TopologyInitializer() == nil {
return nil
}
session, err := s.newSessionFn(opts)
if err != nil {
return err
}
s.session = session
return nil
}
func (s *replicatedSession) setAsyncSessions(opts []Options) error {
sessions := make([]clientSession, 0, len(opts))
for i, oo := range opts {
subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i))
oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope))
session, err := s.newSessionFn(oo)
if err != nil {
return err
}
sessions = append(sessions, session)
}
s.asyncSessions = sessions
return nil
}
type replicatedParams struct {
namespace ident.ID
id ident.ID
t xtime.UnixNano
value float64
unit xtime.Unit
annotation []byte
tags ident.TagIterator
useTags bool
}
// NB(srobb): it would be a nicer to accept a lambda which is the fn to
// be performed on all sessions, however this causes an extra allocation.
func (s replicatedSession) replicate(params replicatedParams) error {
for _, asyncSession := range s.asyncSessions {
asyncSession := asyncSession // capture var
var (
clonedID = s.identifierPool.Clone(params.id)
clonedNS = s.identifierPool.Clone(params.namespace)
clonedTags ident.TagIterator
)
if params.useTags {
clonedTags = params.tags.Duplicate()
}
select {
case s.replicationSemaphore <- struct{}{}:
s.workerPool.Go(func() {
var err error
if params.useTags {
err = asyncSession.WriteTagged(
clonedNS, clonedID, clonedTags, params.t,
params.value, params.unit, params.annotation,
)
} else {
err = asyncSession.Write(
clonedNS, clonedID, params.t,
params.value, params.unit, params.annotation,
)
}
if err != nil {
s.metrics.replicateError.Inc(1)
s.log.Error("could not replicate write", zap.Error(err))
} else {
s.metrics.replicateSuccess.Inc(1)
}
if s.outCh != nil {
s.outCh <- err
}
<-s.replicationSemaphore
})
s.metrics.replicateExecuted.Inc(1)
default:
s.metrics.replicateNotExecuted.Inc(1)
}
}
if params.useTags {
return s.session.WriteTagged(
params.namespace, params.id, params.tags, params.t,
params.value, params.unit, params.annotation,
)
}
return s.session.Write(
params.namespace, params.id, params.t,
params.value, params.unit, params.annotation,
)
}
func (s *replicatedSession) ReadClusterAvailability() (bool, error) {
return s.session.ReadClusterAvailability()
}
func (s *replicatedSession) WriteClusterAvailability() (bool, error) {
return s.session.WriteClusterAvailability()
}
// Write value to the database for an ID.
func (s replicatedSession) Write(
namespace, id ident.ID, t xtime.UnixNano, value float64,
unit xtime.Unit, annotation []byte,
) error |
// WriteTagged value to the database for an ID and given tags.
func (s replicatedSession) WriteTagged(
namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano,
value float64, unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
tags: tags,
useTags: true,
})
}
// Fetch values from the database for an ID.
func (s replicatedSession) Fetch(
namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterator, error) {
return s.session.Fetch(namespace, id, startInclusive, endExclusive)
}
// FetchIDs values from the database for a set of IDs.
func (s replicatedSession) FetchIDs(
namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterators, error) {
return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive)
}
// Aggregate aggregates values from the database for the given set of constraints.
func (s replicatedSession) Aggregate(
ctx context.Context,
ns ident.ID,
q index.Query,
opts index.AggregationOptions,
) (AggregatedTagsIterator, FetchResponseMetadata, error) {
return s.session.Aggregate(ctx, ns, q, opts)
}
// FetchTagged resolves the provided query to known IDs, and fetches the data for them.
func (s replicatedSession) FetchTagged(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (encoding.SeriesIterators, FetchResponseMetadata, error) {
return s.session.FetchTagged(ctx, namespace, q, opts)
}
// FetchTaggedIDs resolves the provided query to known IDs.
func (s replicatedSession) FetchTaggedIDs(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (TaggedIDsIterator, FetchResponseMetadata, error) {
return s.session.FetchTaggedIDs(ctx, namespace, q, opts)
}
// ShardID returns the given shard for an ID for callers
// to easily discern what shard is failing when operations
// for given IDs begin failing.
func (s replicatedSession) ShardID(id ident.ID) (uint32, error) {
return s.session.ShardID(id)
}
// IteratorPools exposes the internal iterator pools used by the session to clients.
func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) {
return s.session.IteratorPools()
}
// Close the session.
func (s replicatedSession) Close() error {
err := s.session.Close()
for _, as := range s.asyncSessions {
if err := as.Close(); err != nil {
s.log.Error("could not close async session: %v", zap.Error(err))
}
| {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
})
} | identifier_body |
replicated_session.go | /storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/x/ident"
m3sync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
)
type newSessionFn func(Options) (clientSession, error)
// replicatedSession is an implementation of clientSession which replicates
// session read/writes to a set of clusters asynchronously.
type replicatedSession struct {
session clientSession
asyncSessions []clientSession
newSessionFn newSessionFn
identifierPool ident.Pool
workerPool m3sync.PooledWorkerPool
replicationSemaphore chan struct{}
scope tally.Scope
log *zap.Logger
metrics replicatedSessionMetrics
outCh chan error
writeTimestampOffset time.Duration
}
type replicatedSessionMetrics struct {
replicateExecuted tally.Counter
replicateNotExecuted tally.Counter
replicateError tally.Counter
replicateSuccess tally.Counter
}
func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
return replicatedSessionMetrics{
replicateExecuted: scope.Counter("replicate.executed"),
replicateNotExecuted: scope.Counter("replicate.not-executed"),
replicateError: scope.Counter("replicate.error"),
replicateSuccess: scope.Counter("replicate.success"),
}
}
// Ensure replicatedSession implements the clientSession interface.
var _ clientSession = (*replicatedSession)(nil)
type replicatedSessionOption func(*replicatedSession)
func withNewSessionFn(fn newSessionFn) replicatedSessionOption {
return func(session *replicatedSession) {
session.newSessionFn = fn
}
}
func newReplicatedSession(
opts Options, asyncOpts []Options, options ...replicatedSessionOption,
) (clientSession, error) {
workerPool := opts.AsyncWriteWorkerPool()
scope := opts.InstrumentOptions().MetricsScope()
session := replicatedSession{
newSessionFn: newSession,
identifierPool: opts.IdentifierPool(),
workerPool: workerPool,
replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()),
scope: scope,
log: opts.InstrumentOptions().Logger(),
metrics: newReplicatedSessionMetrics(scope),
writeTimestampOffset: opts.WriteTimestampOffset(),
}
// Apply options
for _, option := range options {
option(&session)
}
if err := session.setSession(opts); err != nil {
return nil, err
}
if err := session.setAsyncSessions(asyncOpts); err != nil {
return nil, err
}
return &session, nil
}
func (s *replicatedSession) setSession(opts Options) error {
if opts.TopologyInitializer() == nil {
return nil
}
session, err := s.newSessionFn(opts)
if err != nil {
return err
}
s.session = session
return nil
}
func (s *replicatedSession) setAsyncSessions(opts []Options) error {
sessions := make([]clientSession, 0, len(opts))
for i, oo := range opts {
subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i))
oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope))
session, err := s.newSessionFn(oo)
if err != nil {
return err
}
sessions = append(sessions, session)
}
s.asyncSessions = sessions
return nil
}
type replicatedParams struct {
namespace ident.ID
id ident.ID
t xtime.UnixNano
value float64
unit xtime.Unit
annotation []byte
tags ident.TagIterator
useTags bool
}
// NB(srobb): it would be a nicer to accept a lambda which is the fn to
// be performed on all sessions, however this causes an extra allocation.
func (s replicatedSession) replicate(params replicatedParams) error {
for _, asyncSession := range s.asyncSessions {
asyncSession := asyncSession // capture var
var (
clonedID = s.identifierPool.Clone(params.id)
clonedNS = s.identifierPool.Clone(params.namespace)
clonedTags ident.TagIterator
)
if params.useTags {
clonedTags = params.tags.Duplicate()
}
select {
case s.replicationSemaphore <- struct{}{}:
s.workerPool.Go(func() {
var err error
if params.useTags {
err = asyncSession.WriteTagged(
clonedNS, clonedID, clonedTags, params.t,
params.value, params.unit, params.annotation,
)
} else {
err = asyncSession.Write(
clonedNS, clonedID, params.t,
params.value, params.unit, params.annotation,
)
}
if err != nil {
s.metrics.replicateError.Inc(1)
s.log.Error("could not replicate write", zap.Error(err))
} else {
s.metrics.replicateSuccess.Inc(1)
}
if s.outCh != nil {
s.outCh <- err
}
<-s.replicationSemaphore
})
s.metrics.replicateExecuted.Inc(1)
default:
s.metrics.replicateNotExecuted.Inc(1)
}
}
if params.useTags {
return s.session.WriteTagged(
params.namespace, params.id, params.tags, params.t,
params.value, params.unit, params.annotation,
)
}
return s.session.Write(
params.namespace, params.id, params.t,
params.value, params.unit, params.annotation,
)
}
func (s *replicatedSession) ReadClusterAvailability() (bool, error) {
return s.session.ReadClusterAvailability()
}
func (s *replicatedSession) WriteClusterAvailability() (bool, error) {
return s.session.WriteClusterAvailability()
}
// Write value to the database for an ID.
func (s replicatedSession) Write(
namespace, id ident.ID, t xtime.UnixNano, value float64,
unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
})
}
// WriteTagged value to the database for an ID and given tags.
func (s replicatedSession) WriteTagged(
namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano,
value float64, unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace, | annotation: annotation,
tags: tags,
useTags: true,
})
}
// Fetch values from the database for an ID.
func (s replicatedSession) Fetch(
namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterator, error) {
return s.session.Fetch(namespace, id, startInclusive, endExclusive)
}
// FetchIDs values from the database for a set of IDs.
func (s replicatedSession) FetchIDs(
namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterators, error) {
return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive)
}
// Aggregate aggregates values from the database for the given set of constraints.
func (s replicatedSession) Aggregate(
ctx context.Context,
ns ident.ID,
q index.Query,
opts index.AggregationOptions,
) (AggregatedTagsIterator, FetchResponseMetadata, error) {
return s.session.Aggregate(ctx, ns, q, opts)
}
// FetchTagged resolves the provided query to known IDs, and fetches the data for them.
func (s replicatedSession) FetchTagged(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (encoding.SeriesIterators, FetchResponseMetadata, error) {
return s.session.FetchTagged(ctx, namespace, q, opts)
}
// FetchTaggedIDs resolves the provided query to known IDs.
func (s replicatedSession) FetchTaggedIDs(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (TaggedIDsIterator, FetchResponseMetadata, error) {
return s.session.FetchTaggedIDs(ctx, namespace, q, opts)
}
// ShardID returns the given shard for an ID for callers
// to easily discern what shard is failing when operations
// for given IDs begin failing.
func (s replicatedSession) ShardID(id ident.ID) (uint32, error) {
return s.session.ShardID(id)
}
// IteratorPools exposes the internal iterator pools used by the session to clients.
func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) {
return s.session.IteratorPools()
}
// Close the session.
func (s replicatedSession) Close() error {
err := s.session.Close()
for _, as := range s.asyncSessions {
if err := as.Close(); err != nil {
s.log.Error("could not close async session: %v", zap.Error(err))
}
}
| id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit, | random_line_split |
replicated_session.go | /block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/x/ident"
m3sync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
)
type newSessionFn func(Options) (clientSession, error)
// replicatedSession is an implementation of clientSession which replicates
// session read/writes to a set of clusters asynchronously.
type replicatedSession struct {
session clientSession
asyncSessions []clientSession
newSessionFn newSessionFn
identifierPool ident.Pool
workerPool m3sync.PooledWorkerPool
replicationSemaphore chan struct{}
scope tally.Scope
log *zap.Logger
metrics replicatedSessionMetrics
outCh chan error
writeTimestampOffset time.Duration
}
type replicatedSessionMetrics struct {
replicateExecuted tally.Counter
replicateNotExecuted tally.Counter
replicateError tally.Counter
replicateSuccess tally.Counter
}
func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
return replicatedSessionMetrics{
replicateExecuted: scope.Counter("replicate.executed"),
replicateNotExecuted: scope.Counter("replicate.not-executed"),
replicateError: scope.Counter("replicate.error"),
replicateSuccess: scope.Counter("replicate.success"),
}
}
// Ensure replicatedSession implements the clientSession interface.
var _ clientSession = (*replicatedSession)(nil)
type replicatedSessionOption func(*replicatedSession)
func withNewSessionFn(fn newSessionFn) replicatedSessionOption {
return func(session *replicatedSession) {
session.newSessionFn = fn
}
}
func newReplicatedSession(
opts Options, asyncOpts []Options, options ...replicatedSessionOption,
) (clientSession, error) {
workerPool := opts.AsyncWriteWorkerPool()
scope := opts.InstrumentOptions().MetricsScope()
session := replicatedSession{
newSessionFn: newSession,
identifierPool: opts.IdentifierPool(),
workerPool: workerPool,
replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()),
scope: scope,
log: opts.InstrumentOptions().Logger(),
metrics: newReplicatedSessionMetrics(scope),
writeTimestampOffset: opts.WriteTimestampOffset(),
}
// Apply options
for _, option := range options {
option(&session)
}
if err := session.setSession(opts); err != nil {
return nil, err
}
if err := session.setAsyncSessions(asyncOpts); err != nil {
return nil, err
}
return &session, nil
}
func (s *replicatedSession) | (opts Options) error {
if opts.TopologyInitializer() == nil {
return nil
}
session, err := s.newSessionFn(opts)
if err != nil {
return err
}
s.session = session
return nil
}
func (s *replicatedSession) setAsyncSessions(opts []Options) error {
sessions := make([]clientSession, 0, len(opts))
for i, oo := range opts {
subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i))
oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope))
session, err := s.newSessionFn(oo)
if err != nil {
return err
}
sessions = append(sessions, session)
}
s.asyncSessions = sessions
return nil
}
type replicatedParams struct {
namespace ident.ID
id ident.ID
t xtime.UnixNano
value float64
unit xtime.Unit
annotation []byte
tags ident.TagIterator
useTags bool
}
// NB(srobb): it would be a nicer to accept a lambda which is the fn to
// be performed on all sessions, however this causes an extra allocation.
func (s replicatedSession) replicate(params replicatedParams) error {
for _, asyncSession := range s.asyncSessions {
asyncSession := asyncSession // capture var
var (
clonedID = s.identifierPool.Clone(params.id)
clonedNS = s.identifierPool.Clone(params.namespace)
clonedTags ident.TagIterator
)
if params.useTags {
clonedTags = params.tags.Duplicate()
}
select {
case s.replicationSemaphore <- struct{}{}:
s.workerPool.Go(func() {
var err error
if params.useTags {
err = asyncSession.WriteTagged(
clonedNS, clonedID, clonedTags, params.t,
params.value, params.unit, params.annotation,
)
} else {
err = asyncSession.Write(
clonedNS, clonedID, params.t,
params.value, params.unit, params.annotation,
)
}
if err != nil {
s.metrics.replicateError.Inc(1)
s.log.Error("could not replicate write", zap.Error(err))
} else {
s.metrics.replicateSuccess.Inc(1)
}
if s.outCh != nil {
s.outCh <- err
}
<-s.replicationSemaphore
})
s.metrics.replicateExecuted.Inc(1)
default:
s.metrics.replicateNotExecuted.Inc(1)
}
}
if params.useTags {
return s.session.WriteTagged(
params.namespace, params.id, params.tags, params.t,
params.value, params.unit, params.annotation,
)
}
return s.session.Write(
params.namespace, params.id, params.t,
params.value, params.unit, params.annotation,
)
}
func (s *replicatedSession) ReadClusterAvailability() (bool, error) {
return s.session.ReadClusterAvailability()
}
func (s *replicatedSession) WriteClusterAvailability() (bool, error) {
return s.session.WriteClusterAvailability()
}
// Write value to the database for an ID.
func (s replicatedSession) Write(
namespace, id ident.ID, t xtime.UnixNano, value float64,
unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
})
}
// WriteTagged value to the database for an ID and given tags.
func (s replicatedSession) WriteTagged(
namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano,
value float64, unit xtime.Unit, annotation []byte,
) error {
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
tags: tags,
useTags: true,
})
}
// Fetch values from the database for an ID.
func (s replicatedSession) Fetch(
namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterator, error) {
return s.session.Fetch(namespace, id, startInclusive, endExclusive)
}
// FetchIDs values from the database for a set of IDs.
func (s replicatedSession) FetchIDs(
namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano,
) (encoding.SeriesIterators, error) {
return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive)
}
// Aggregate aggregates values from the database for the given set of constraints.
func (s replicatedSession) Aggregate(
ctx context.Context,
ns ident.ID,
q index.Query,
opts index.AggregationOptions,
) (AggregatedTagsIterator, FetchResponseMetadata, error) {
return s.session.Aggregate(ctx, ns, q, opts)
}
// FetchTagged resolves the provided query to known IDs, and fetches the data for them.
func (s replicatedSession) FetchTagged(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (encoding.SeriesIterators, FetchResponseMetadata, error) {
return s.session.FetchTagged(ctx, namespace, q, opts)
}
// FetchTaggedIDs resolves the provided query to known IDs.
func (s replicatedSession) FetchTaggedIDs(
ctx context.Context,
namespace ident.ID,
q index.Query,
opts index.QueryOptions,
) (TaggedIDsIterator, FetchResponseMetadata, error) {
return s.session.FetchTaggedIDs(ctx, namespace, q, opts)
}
// ShardID returns the given shard for an ID for callers
// to easily discern what shard is failing when operations
// for given IDs begin failing.
func (s replicatedSession) ShardID(id ident.ID) (uint32, error) {
return s.session.ShardID(id)
}
// IteratorPools exposes the internal iterator pools used by the session to clients.
func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) {
return s.session.IteratorPools()
}
// Close the session.
func (s replicatedSession) Close() error {
err := s.session.Close()
for _, as := range s.asyncSessions {
if err := as.Close(); err != nil {
s.log.Error("could not close async session: %v", zap.Error(err))
}
}
| setSession | identifier_name |
shadow_logger.rs | ::io::stdout();
let stdout_locked = stdout_unlocked.lock();
let mut stdout = std::io::BufWriter::new(stdout_locked);
while toflush > 0 {
let record = match self.records.pop() {
Some(r) => r,
None => {
// This can happen if another thread panics while the
// logging thread is flushing. In that case both threads
// will be consuming from the queue.
break;
}
};
toflush -= 1;
if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() {
// Send to both stdout and stderr.
let stderr_unlocked = std::io::stderr();
let stderr_locked = stderr_unlocked.lock();
let mut stderr = std::io::BufWriter::new(stderr_locked);
let line = format!("{record}");
write!(stdout, "{line}")?;
write!(stderr, "{line}")?;
} else {
write!(stdout, "{record}")?;
}
}
if let Some(done_sender) = done_sender {
// We can't log from this thread without risking deadlock, so in the
// unlikely case that the calling thread has gone away, just print
// directly.
done_sender.send(()).unwrap_or_else(|e| {
println!(
"WARNING: Logger couldn't notify
calling thread: {:?}",
e
)
});
}
Ok(())
}
/// When disabled, the logger thread is notified to write each record as
/// soon as it's created. The calling thread still isn't blocked on the
/// record actually being written, though.
pub fn set_buffering_enabled(&self, buffering_enabled: bool) {
let mut writer = self.buffering_enabled.write().unwrap();
*writer = buffering_enabled;
}
/// If the maximum log level has not yet been set, returns `LevelFilter::Trace`.
pub fn max_level(&self) -> LevelFilter {
self.max_log_level
.get()
.copied()
.unwrap_or(LevelFilter::Trace)
}
/// Set the default maximum log level, but this can be overridden per-host. Is only intended to
/// be called from `init()`. Will panic if called more than once.
fn set_max_level(&self, level: LevelFilter) {
self.max_log_level.set(level).unwrap()
}
/// Set whether to log errors to stderr in addition to stdout.
///
/// Is only intended to be called from `init()`. Will panic if called more
/// than once.
fn set_log_errors_to_stderr(&self, val: bool) {
self.log_errors_to_stderr.set(val).unwrap()
}
// Send a flush command to the logger thread.
fn flush_impl(&self, notify_done: Option<Sender<()>>) {
self.send_command(LoggerCommand::Flush(notify_done))
}
// Send a flush command to the logger thread and block until it's completed.
fn flush_sync(&self) {
let (done_sender, done_receiver) = std::sync::mpsc::channel();
self.flush_impl(Some(done_sender));
done_receiver.recv().unwrap();
}
// Send a flush command to the logger thread.
fn flush_async(&self) {
self.flush_impl(None);
}
// Send a command to the logger thread.
fn send_command(&self, cmd: LoggerCommand) {
SENDER
.try_with(|thread_sender| {
if thread_sender.borrow().is_none() {
let lock = self.command_sender.lock().unwrap();
*thread_sender.borrow_mut() = Some(lock.clone());
}
thread_sender
.borrow()
.as_ref()
.unwrap()
.send(cmd)
.unwrap_or_else(|e| {
println!("WARNING: Couldn't send command to logger thread: {:?}", e);
});
})
.unwrap_or_else(|e| {
println!(
"WARNING: Couldn't get sender channel to logger thread: {:?}",
e
);
});
}
}
impl Log for ShadowLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
let filter = match Worker::with_active_host(|host| host.info().log_level) {
Some(Some(level)) => level,
_ => self.max_level(),
};
metadata.level() <= filter
}
fn log(&self, record: &Record) {
if !self.enabled(record.metadata()) {
return;
}
let message = std::fmt::format(*record.args());
let host_info = Worker::with_active_host(|host| host.info().clone());
let mut shadowrecord = ShadowLogRecord {
level: record.level(),
file: record.file_static(),
module_path: record.module_path_static(),
line: record.line(),
message,
wall_time: Duration::from_micros(unsafe {
u64::try_from(c_log::logger_elapsed_micros()).unwrap()
}),
emu_time: Worker::current_time(),
thread_name: THREAD_NAME
.try_with(|name| (*name).clone())
.unwrap_or_else(|_| get_thread_name()),
thread_id: THREAD_ID
.try_with(|id| **id)
.unwrap_or_else(|_| nix::unistd::gettid()),
host_info,
};
loop {
match self.records.push(shadowrecord) {
Ok(()) => break,
Err(r) => {
// Queue is full. Flush it and try again.
shadowrecord = r;
self.flush_sync();
}
}
}
if record.level() == Level::Error {
// Unlike in Shadow's C code, we don't abort the program on Error
// logs. In Rust the same purpose is filled with `panic` and
// `unwrap`. C callers will still exit or abort via the lib/logger wrapper.
//
// Flush *synchronously*, since we're likely about to crash one way or another.
self.flush_sync();
} else if self.records.len() > ASYNC_FLUSH_QD_LINES_THRESHOLD
|| !*self.buffering_enabled.read().unwrap()
{
self.flush_async();
}
}
fn flush(&self) {
self.flush_sync();
}
}
struct ShadowLogRecord {
level: Level,
file: Option<&'static str>,
module_path: Option<&'static str>,
line: Option<u32>,
message: String,
wall_time: Duration,
emu_time: Option<EmulatedTime>,
thread_name: String,
thread_id: nix::unistd::Pid,
host_info: Option<Arc<HostInfo>>,
}
impl std::fmt::Display for ShadowLogRecord {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
{
let parts = TimeParts::from_nanos(self.wall_time.as_nanos());
write!(
f,
"{:02}:{:02}:{:02}.{:06}",
parts.hours,
parts.mins,
parts.secs,
parts.nanos / 1000
)?;
}
write!(f, " [{}:{}]", self.thread_id, self.thread_name)?;
if let Some(emu_time) = self.emu_time {
let sim_time = emu_time.duration_since(&EmulatedTime::SIMULATION_START);
let parts = TimeParts::from_nanos(sim_time.as_nanos());
write!(
f,
" {:02}:{:02}:{:02}.{:09}",
parts.hours, parts.mins, parts.secs, parts.nanos
)?;
} else {
write!(f, " n/a")?;
}
write!(f, " [{level}]", level = self.level)?;
if let Some(host) = &self.host_info {
write!(
f,
" [{hostname}:{ip}]",
hostname = host.name,
ip = host.default_ip,
)?;
} else {
write!(f, " [n/a]",)?;
}
write!(
f,
" [{file}:",
file = self
.file
.map(|f| if let Some(sep_pos) = f.rfind('/') {
&f[(sep_pos + 1)..]
} else {
f
})
.unwrap_or("n/a"),
)?;
if let Some(line) = self.line {
write!(f, "{line}", line = line)?;
} else {
write!(f, "n/a")?;
}
writeln!(
f,
"] [{module}] {msg}",
module = self.module_path.unwrap_or("n/a"),
msg = self.message
)?;
Ok(())
}
}
enum LoggerCommand {
// Flush; takes an optional one-shot channel to notify that the flush has completed.
Flush(Option<Sender<()>>),
}
pub fn set_buffering_enabled(buffering_enabled: bool) {
SHADOW_LOGGER.set_buffering_enabled(buffering_enabled);
}
mod export {
use super::*;
/// When disabled, the logger thread is notified to write each record as
/// soon as it's created. The calling thread still isn't blocked on the
/// record actually being written, though.
#[no_mangle]
pub unsafe extern "C" fn shadow_logger_setEnableBuffering(buffering_enabled: i32) | {
set_buffering_enabled(buffering_enabled != 0)
} | identifier_body |
|
shadow_logger.rs | (panic_info);
}));
Ok(())
}
/// A logger specialized for Shadow. It attaches simulation context to log
/// entries (e.g. sim time, running process, etc.). It's also designed for
/// high performance to accomodate heavy logging from multiple threads.
pub struct ShadowLogger {
// Channel used to send commands to the logger's thread.
//
// The Sender half of a channel isn't Sync, so we must protect it with a
// Mutex to make ShadowLogger be Sync. This is only accessed once per
// thread, though, to clone into the thread-local SENDER.
command_sender: Mutex<Sender<LoggerCommand>>,
// Like the sender, needs a Mutex for ShadowLogger to be Sync.
// The Mutex is only locked once though by the logger thread, which keeps
// it locked for as long as it's running.
command_receiver: Mutex<Receiver<LoggerCommand>>,
// A lock-free queue for individual log records. We don't put the records
// themselves in the `command_sender`, because `Sender` doesn't support
// getting the queue length. Conversely we don't put commands in this queue
// because it doesn't support blocking operations.
//
// The size is roughly SYNC_FLUSH_QD_LINES_THRESHOLD *
// size_of<ShadowLogRecord>; we might want to consider SegQueue (which grows
// and shrinks dynamically) instead if we ever make SYNC_FLUSH_QD_LINES_THRESHOLD very
// large.
records: ArrayQueue<ShadowLogRecord>,
// When false, sends a (still-asynchronous) flush command to the logger
// thread every time a record is pushed into `records`.
buffering_enabled: RwLock<bool>,
// The maximum log level, unless overridden by a host-specific log level.
max_log_level: OnceCell<LevelFilter>,
// Whether to log errors to stderr in addition to stdout.
log_errors_to_stderr: OnceCell<bool>,
}
thread_local!(static SENDER: RefCell<Option<Sender<LoggerCommand>>> = RefCell::new(None));
thread_local!(static THREAD_NAME: Lazy<String> = Lazy::new(|| { get_thread_name() }));
thread_local!(static THREAD_ID: Lazy<nix::unistd::Pid> = Lazy::new(|| { nix::unistd::gettid() }));
fn get_thread_name() -> String {
let mut thread_name = Vec::<i8>::with_capacity(16);
let res = unsafe {
thread_name.set_len(thread_name.capacity());
// ~infallible when host_name is at least 16 bytes.
libc::pthread_getname_np(
libc::pthread_self(),
thread_name.as_mut_ptr(),
thread_name.len(),
)
};
// The most likely cause of failure is a bug in the caller.
debug_assert_eq!(res, 0, "pthread_getname_np: {}", nix::errno::from_i32(res));
if res == 0 {
// SAFETY: We just initialized the input buffer `thread_name`, and
// `thread_name_cstr` won't outlive it.
let thread_name_cstr = unsafe { std::ffi::CStr::from_ptr(thread_name.as_ptr()) };
return thread_name_cstr.to_owned().to_string_lossy().to_string();
}
// Another potential reason for failure is if it couldn't open
// /proc/self/task/[tid]/comm. We're probably in a bad state anyway if that
// happens, but try to recover anyway.
// Empty string
String::new()
}
impl ShadowLogger {
fn new() -> ShadowLogger {
let (sender, receiver) = std::sync::mpsc::channel();
ShadowLogger {
records: ArrayQueue::new(SYNC_FLUSH_QD_LINES_THRESHOLD),
command_sender: Mutex::new(sender),
command_receiver: Mutex::new(receiver),
buffering_enabled: RwLock::new(false),
max_log_level: OnceCell::new(),
log_errors_to_stderr: OnceCell::new(),
}
}
// Function executed by the logger's helper thread, onto which we offload as
// much work as we can.
fn logger_thread_fn(&self) {
let command_receiver = self.command_receiver.lock().unwrap();
loop {
use std::sync::mpsc::RecvTimeoutError;
match command_receiver.recv_timeout(MIN_FLUSH_FREQUENCY) {
Ok(LoggerCommand::Flush(done_sender)) => self.flush_records(done_sender).unwrap(),
Err(RecvTimeoutError::Timeout) => {
// Flush
self.flush_records(None).unwrap();
}
Err(e) => panic!("Unexpected error {}", e),
}
}
}
// Function called by the logger's helper thread to flush the contents of
// self.records. If `done_sender` is provided, it's notified after the flush
// has completed.
fn flush_records(&self, done_sender: Option<Sender<()>>) -> std::io::Result<()> {
use std::io::Write;
// Only flush records that are already in the queue, not ones that
// arrive while we're flushing. Otherwise callers who perform a
// synchronous flush (whether this flush operation or another one that
// arrives while we're flushing) will be left waiting longer than
// necessary. Also keeps us from holding the stdout lock indefinitely.
let mut toflush = self.records.len();
let stdout_unlocked = std::io::stdout();
let stdout_locked = stdout_unlocked.lock();
let mut stdout = std::io::BufWriter::new(stdout_locked);
while toflush > 0 {
let record = match self.records.pop() {
Some(r) => r,
None => {
// This can happen if another thread panics while the
// logging thread is flushing. In that case both threads
// will be consuming from the queue.
break;
}
};
toflush -= 1;
if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() {
// Send to both stdout and stderr.
let stderr_unlocked = std::io::stderr();
let stderr_locked = stderr_unlocked.lock();
let mut stderr = std::io::BufWriter::new(stderr_locked);
let line = format!("{record}");
write!(stdout, "{line}")?;
write!(stderr, "{line}")?;
} else {
write!(stdout, "{record}")?;
}
}
if let Some(done_sender) = done_sender {
// We can't log from this thread without risking deadlock, so in the
// unlikely case that the calling thread has gone away, just print
// directly.
done_sender.send(()).unwrap_or_else(|e| {
println!(
"WARNING: Logger couldn't notify
calling thread: {:?}",
e
)
});
}
Ok(())
}
/// When disabled, the logger thread is notified to write each record as
/// soon as it's created. The calling thread still isn't blocked on the
/// record actually being written, though.
pub fn set_buffering_enabled(&self, buffering_enabled: bool) {
let mut writer = self.buffering_enabled.write().unwrap();
*writer = buffering_enabled;
}
/// If the maximum log level has not yet been set, returns `LevelFilter::Trace`.
pub fn max_level(&self) -> LevelFilter {
self.max_log_level
.get()
.copied()
.unwrap_or(LevelFilter::Trace)
}
/// Set the default maximum log level, but this can be overridden per-host. Is only intended to
/// be called from `init()`. Will panic if called more than once.
fn set_max_level(&self, level: LevelFilter) {
self.max_log_level.set(level).unwrap()
}
/// Set whether to log errors to stderr in addition to stdout.
///
/// Is only intended to be called from `init()`. Will panic if called more
/// than once.
fn set_log_errors_to_stderr(&self, val: bool) {
self.log_errors_to_stderr.set(val).unwrap()
}
// Send a flush command to the logger thread.
fn flush_impl(&self, notify_done: Option<Sender<()>>) {
self.send_command(LoggerCommand::Flush(notify_done))
}
// Send a flush command to the logger thread and block until it's completed.
fn flush_sync(&self) {
let (done_sender, done_receiver) = std::sync::mpsc::channel();
self.flush_impl(Some(done_sender));
done_receiver.recv().unwrap();
}
// Send a flush command to the logger thread.
fn flush_async(&self) {
self.flush_impl(None);
}
// Send a command to the logger thread.
fn send_command(&self, cmd: LoggerCommand) {
SENDER
.try_with(|thread_sender| {
if thread_sender.borrow().is_none() {
let lock = self.command_sender.lock().unwrap();
*thread_sender.borrow_mut() = Some(lock.clone());
}
thread_sender
.borrow()
.as_ref()
.unwrap()
.send(cmd)
.unwrap_or_else(|e| {
println!("WARNING: Couldn't send command to logger thread: {:?}", e);
});
})
.unwrap_or_else(|e| {
println!(
"WARNING: Couldn't get sender channel to logger thread: {:?}",
e
);
});
}
}
impl Log for ShadowLogger {
fn | enabled | identifier_name |
|
shadow_logger.rs | .
pub struct ShadowLogger {
// Channel used to send commands to the logger's thread.
//
// The Sender half of a channel isn't Sync, so we must protect it with a
// Mutex to make ShadowLogger be Sync. This is only accessed once per
// thread, though, to clone into the thread-local SENDER.
command_sender: Mutex<Sender<LoggerCommand>>,
// Like the sender, needs a Mutex for ShadowLogger to be Sync.
// The Mutex is only locked once though by the logger thread, which keeps
// it locked for as long as it's running.
command_receiver: Mutex<Receiver<LoggerCommand>>,
// A lock-free queue for individual log records. We don't put the records
// themselves in the `command_sender`, because `Sender` doesn't support
// getting the queue length. Conversely we don't put commands in this queue
// because it doesn't support blocking operations.
//
// The size is roughly SYNC_FLUSH_QD_LINES_THRESHOLD *
// size_of<ShadowLogRecord>; we might want to consider SegQueue (which grows
// and shrinks dynamically) instead if we ever make SYNC_FLUSH_QD_LINES_THRESHOLD very
// large.
records: ArrayQueue<ShadowLogRecord>,
// When false, sends a (still-asynchronous) flush command to the logger
// thread every time a record is pushed into `records`.
buffering_enabled: RwLock<bool>,
// The maximum log level, unless overridden by a host-specific log level.
max_log_level: OnceCell<LevelFilter>,
// Whether to log errors to stderr in addition to stdout.
log_errors_to_stderr: OnceCell<bool>,
}
thread_local!(static SENDER: RefCell<Option<Sender<LoggerCommand>>> = RefCell::new(None));
thread_local!(static THREAD_NAME: Lazy<String> = Lazy::new(|| { get_thread_name() }));
thread_local!(static THREAD_ID: Lazy<nix::unistd::Pid> = Lazy::new(|| { nix::unistd::gettid() }));
fn get_thread_name() -> String {
let mut thread_name = Vec::<i8>::with_capacity(16);
let res = unsafe {
thread_name.set_len(thread_name.capacity());
// ~infallible when host_name is at least 16 bytes.
libc::pthread_getname_np(
libc::pthread_self(),
thread_name.as_mut_ptr(),
thread_name.len(),
)
};
// The most likely cause of failure is a bug in the caller.
debug_assert_eq!(res, 0, "pthread_getname_np: {}", nix::errno::from_i32(res));
if res == 0 {
// SAFETY: We just initialized the input buffer `thread_name`, and
// `thread_name_cstr` won't outlive it.
let thread_name_cstr = unsafe { std::ffi::CStr::from_ptr(thread_name.as_ptr()) };
return thread_name_cstr.to_owned().to_string_lossy().to_string();
}
// Another potential reason for failure is if it couldn't open
// /proc/self/task/[tid]/comm. We're probably in a bad state anyway if that
// happens, but try to recover anyway.
// Empty string
String::new()
}
impl ShadowLogger {
fn new() -> ShadowLogger {
let (sender, receiver) = std::sync::mpsc::channel();
ShadowLogger {
records: ArrayQueue::new(SYNC_FLUSH_QD_LINES_THRESHOLD),
command_sender: Mutex::new(sender),
command_receiver: Mutex::new(receiver),
buffering_enabled: RwLock::new(false),
max_log_level: OnceCell::new(),
log_errors_to_stderr: OnceCell::new(),
}
}
// Function executed by the logger's helper thread, onto which we offload as
// much work as we can.
fn logger_thread_fn(&self) {
let command_receiver = self.command_receiver.lock().unwrap();
loop {
use std::sync::mpsc::RecvTimeoutError;
match command_receiver.recv_timeout(MIN_FLUSH_FREQUENCY) {
Ok(LoggerCommand::Flush(done_sender)) => self.flush_records(done_sender).unwrap(),
Err(RecvTimeoutError::Timeout) => {
// Flush
self.flush_records(None).unwrap();
}
Err(e) => panic!("Unexpected error {}", e),
}
}
}
// Function called by the logger's helper thread to flush the contents of
// self.records. If `done_sender` is provided, it's notified after the flush
// has completed.
fn flush_records(&self, done_sender: Option<Sender<()>>) -> std::io::Result<()> {
use std::io::Write;
// Only flush records that are already in the queue, not ones that
// arrive while we're flushing. Otherwise callers who perform a
// synchronous flush (whether this flush operation or another one that
// arrives while we're flushing) will be left waiting longer than
// necessary. Also keeps us from holding the stdout lock indefinitely.
let mut toflush = self.records.len();
let stdout_unlocked = std::io::stdout();
let stdout_locked = stdout_unlocked.lock();
let mut stdout = std::io::BufWriter::new(stdout_locked);
while toflush > 0 {
let record = match self.records.pop() {
Some(r) => r,
None => {
// This can happen if another thread panics while the
// logging thread is flushing. In that case both threads
// will be consuming from the queue.
break;
}
};
toflush -= 1;
if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() {
// Send to both stdout and stderr.
let stderr_unlocked = std::io::stderr();
let stderr_locked = stderr_unlocked.lock();
let mut stderr = std::io::BufWriter::new(stderr_locked);
let line = format!("{record}");
write!(stdout, "{line}")?;
write!(stderr, "{line}")?;
} else {
write!(stdout, "{record}")?;
}
}
if let Some(done_sender) = done_sender {
// We can't log from this thread without risking deadlock, so in the
// unlikely case that the calling thread has gone away, just print
// directly.
done_sender.send(()).unwrap_or_else(|e| {
println!(
"WARNING: Logger couldn't notify
calling thread: {:?}",
e
)
});
}
Ok(())
}
/// When disabled, the logger thread is notified to write each record as
/// soon as it's created. The calling thread still isn't blocked on the
/// record actually being written, though.
pub fn set_buffering_enabled(&self, buffering_enabled: bool) {
let mut writer = self.buffering_enabled.write().unwrap();
*writer = buffering_enabled;
}
/// If the maximum log level has not yet been set, returns `LevelFilter::Trace`.
pub fn max_level(&self) -> LevelFilter {
self.max_log_level
.get()
.copied()
.unwrap_or(LevelFilter::Trace)
}
/// Set the default maximum log level, but this can be overridden per-host. Is only intended to
/// be called from `init()`. Will panic if called more than once.
fn set_max_level(&self, level: LevelFilter) {
self.max_log_level.set(level).unwrap()
}
/// Set whether to log errors to stderr in addition to stdout.
///
/// Is only intended to be called from `init()`. Will panic if called more
/// than once.
fn set_log_errors_to_stderr(&self, val: bool) {
self.log_errors_to_stderr.set(val).unwrap()
}
// Send a flush command to the logger thread.
fn flush_impl(&self, notify_done: Option<Sender<()>>) {
self.send_command(LoggerCommand::Flush(notify_done))
}
// Send a flush command to the logger thread and block until it's completed.
fn flush_sync(&self) {
let (done_sender, done_receiver) = std::sync::mpsc::channel();
self.flush_impl(Some(done_sender));
done_receiver.recv().unwrap();
}
// Send a flush command to the logger thread.
fn flush_async(&self) {
self.flush_impl(None);
}
// Send a command to the logger thread.
fn send_command(&self, cmd: LoggerCommand) {
SENDER
.try_with(|thread_sender| {
if thread_sender.borrow().is_none() {
let lock = self.command_sender.lock().unwrap();
*thread_sender.borrow_mut() = Some(lock.clone());
}
thread_sender
.borrow()
.as_ref()
.unwrap()
.send(cmd)
.unwrap_or_else(|e| {
println!("WARNING: Couldn't send command to logger thread: {:?}", e);
});
})
.unwrap_or_else(|e| {
println!(
"WARNING: Couldn't get sender channel to logger thread: {:?}",
e
);
});
}
}
impl Log for ShadowLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
let filter = match Worker::with_active_host(|host| host.info().log_level) {
Some(Some(level)) => level,
_ => self.max_level(),
};
metadata.level() <= filter | }
| random_line_split |
|
raw_data_process.py | ') as f:
for line in f:
stopwords.add(line.strip())
return stopwords
class ProcessedData:
def __init__(self, question, question_label, background, background_label):
self.question = question
self.question_label = question_label
self.background = background
self.background_label = background_label
class Word:
def __init__(self, text: str, start: int, end: int):
self.text = text
self.start = start
self.end = end
# ๅฎ็ฐ__hash__ and __eq__ ๆนไพฟไนๅ็setๅป้
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
return type(self) == type(other) and self.text == other.text
def __str__(self):
return str(self.to_dict())
def to_dict(self):
return {'text': self.text, 'start': self.start, 'end': self.end}
def strip(self, s):
if self.text.startswith(s):
self.text = self.text[len(s):]
self.start += len(s)
if self.text.endswith(s):
self.text = self.text[:-len(s)]
self.end -= len(s)
class RawProcessor:
def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False,
redundant: bool = True):
"""
ๅค็ๅๅงๆฐๆฎ
:param file_paths: ่ฆๅค็็ๆไปถ้ๅ็่ทฏๅพ
:param store_path: ๅค็ๅฎๆ็ๆฐๆฎๅญๆพ่ทฏๅพ
:param least_word_len: ๆฝๅ็ๆๅฐ่ฏ้ฟ
:param use_cut: ๆฏๅฆไฝฟ็จๅ่ฏ
:param redundant: ๆฏๅฆไฟ็ๆฏไธชๅฅๅญไธญ็้ๅค่ฏ
"""
self.file_paths = file_paths
self.store_path = store_path
self.use_cut = use_cut
self.least_word_len = least_word_len
self.redundant = redundant
# ๅค็่ฟ็จไธญ็่ฏ้ข็ป่ฎก {word : freq}
self.words_counter = defaultdict(int)
# ็ป่ฎกๆป้ฟๅบฆ๏ผ่ฎพ็ฝฎๅ้max_len {len : freq}
self.length_counter = defaultdict(int)
# ๅค็ๅฎ็word้ๅ
self.processed_data: [ProcessedData] = []
def _get_same_word(self, source: str, target: str):
"""
้ๅๅๅฅๅญ๏ผๅจๅๅฅๅญไธญๆๅไธ็ฎๆ ๅฅๅญ็ธๅ้จๅ๏ผๆ็
งๆ้ฟๅน้
ๅๅ
:param source: ๅๅฅๅญ
:param target: ็ฎๆ ๅฅๅญ
:return: word, word_index
"""
source_len = len(source)
res_words: [Word] = [] # ่ฟๅ็ปๆ
index = 0
while index < source_len:
# ไธๆฏๆ ็น็ฌฆๅท๏ผไธๅจ่งฃๆไธญ
if source[index] not in ZH_SYMBOLS and source[index] in target:
word_len = 1
# ๅๅๅปถ็ณ
while index + word_len < source_len and source[index:index + word_len + 1] in target:
if source[index + word_len] not in ZH_SYMBOLS:
word_len += 1
else:
break
word = source[index:index + word_len]
if len(word) >= self.least_word_len and word not in STOPWORDS:
# ๅ ๅ
ฅ่ฏฅ่ฏ
res_words.append(Word(text=word, start=index, end=index + word_len))
index += word_len
else:
index += 1
return res_words
def _get_same_words_with_cut(self, source: str, target: str):
"""
ไฝฟ็จ็ปๅทดๅ่ฏๆฅๆฝๅ็ธๅ่ฏ
"""
res_words: [Word] = []
source_cut = [Word(*word) for word in jieba.tokenize(source)]
target_cut = [Word(*word) for word in jieba.tokenize(target)]
for word in source_cut:
if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len:
res_words.append(word)
return res_words
def _filter_words(self, words: [Word]):
"""
ๆ นๆฎไธๅฎๆกไปถๅป้คๆฝๅ้ๅ่ฏไธญ็ไธไบ่ฏ
"""
_words = words if self.redundant else list(set(words))
# counter_path = os.path.join(self.store_path, 'word_count.json')
# if not os.path.exists(counter_path):
# return _words
# else:
# counter_dict = read_json(counter_path)
# ๅๅบ้ๅ๏ผๅจ้ๅๆถๅ ้ค
for i in range(len(_words) - 1, -1, -1):
word = _words[i]
word.strip('็')
word.strip('ๅจ')
word.strip('ไธ')
| trip('ไบ')
# ้ฟๅบฆไธ็ฌฆๅๆๅชๅ
ๅซๆฐๅญๅๅญๆฏ
if len(word.text) < self.least_word_len or word.text.isnumeric():
_words.remove(word)
return _words
@staticmethod
def generate_tags(sequence_len: int, words: [Word]):
"""
ๆ นๆฎ้ฟๅบฆๅไฝ็ฝฎ็ดขๅผไบง็BIOๆ ็ญพ
:param sequence_len: ้ฟๅบฆ
:param words: ๆฝๅ่ฏ้ๅ
:return: ๆ ็ญพ
"""
tags = ['O'] * sequence_len
for word in words:
start = word.start
tags[start] = 'B'
while start < word.end - 1:
start += 1
tags[start] = 'I'
return tags
def process_raw_data(self):
"""
ๅค็ๅๅงๆฐๆฎ
"""
# ๅพช็ฏ่ฏปๅๆไปถ
if self.use_cut:
jieba.load_userdict("data/geo_words_no_normal.txt")
for file_path in self.file_paths:
# ่ฏปๅraw_data
raw_data = read_json(file_path)
logger.info(f"Processing {file_path} - Question count: {len(raw_data)}")
background_key = 'scenario_text' if "websoft" in file_path else 'background'
# ๅค็raw_data
for idx, total_question in enumerate(raw_data):
# ๆๅ่ๆฏๅ่งฃๆ
background = total_question[background_key]
explain = total_question['explanation']
question = total_question['question']
# ่ทณ่ฟๆฒกๆ่ๆฏไฟกๆฏๆ่งฃๆ็้ข็ฎ
if not background or not explain or len(explain) < 50:
continue
# ้ขๅค็
background = preprocess(background)
explain = preprocess(explain)
question = preprocess(question)
# ๅฏปๆพ้ๅ่ฏ
if self.use_cut:
words_background = self._get_same_words_with_cut(background, explain)
words_question = self._get_same_words_with_cut(question, explain)
else:
words_background = self._get_same_word(background, explain)
words_question = self._get_same_word(question, explain)
# ่ฟๆปคๅชๅฃฐ
words_question = self._filter_words(words_question)
words_background = self._filter_words(words_background)
# ็ป่ฎก่ฏ้ขไฟกๆฏ
for word in words_question + words_background:
self.words_counter[word.text] += 1
# ็ๆๆ ็ญพ
tags_back = self.generate_tags(len(background), words_background)
tags_question = self.generate_tags(len(question), words_question)
# ็ป่ฎก้ฟๅบฆ
self.length_counter[len(question) + len(background)] += 1
# ๆทปๅ ๅค็ๅฅฝ็ๆฐๆฎ/
self.processed_data.append(ProcessedData(question=question,
question_label=tags_question,
background=background,
background_label=tags_back))
if idx < 5:
logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}")
logger.info(f"question_len: {len(question)}, question: {question}")
logger.info("words_question: " + ' | '.join([word.text for word in words_question]))
logger.info("tags_question: " + ' '.join(tags_question))
logger.info(f"background_len: {len(background)}, background: {background}")
logger.info("tags_back: " + ' '.join(tags_back))
logger.info("words_back: " + ' | '.join([word.text for word in words_background]))
logger.info("explain: " + explain)
def write_processed_data(self, data_type: str, processed_data: [ProcessedData]):
"""
ๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆไปถ
:param data_type: ๅๅ
ฅๆไปถ็็ง็ฑป๏ผtrain dev test
:param processed_data: ๅค็ๅฅฝ็ๆฐๆฎ
"""
with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f:
for data in processed_data:
f.write('-DOC_START-\n')
for i in range(len(data.question)):
f.write(data.question[i] + ' ' + data.question_label[i] + '\n')
f.write('\n')
for i in range(len(data.background)):
f.write(data.background[i] + ' ' + data.background_label[i] + '\n')
def prepare_data(self, data_split_dict):
"""
ๆ็
งไธๅฎ็ๆฏไพๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆๅฎๆไปถ
:param data_split_dict:
"""
random.shuffle(self.processed_data)
| word.s | identifier_name |
raw_data_process.py | ') as f:
for line in f:
stopwords.add(line.strip())
return stopwords
class ProcessedData:
def __init__(self, question, question_label, background, background_label):
self.question = question
self.question_label = question_label
self.background = background
self.background_label = background_label
class Word:
def __init__(self, text: str, start: int, end: int):
self.text = text
self.start = start
self.end = end
# ๅฎ็ฐ__hash__ and __eq__ ๆนไพฟไนๅ็setๅป้
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
return type(self) == type(other) and self.text == other.text
def __str__(self):
return str(self.to_dict())
def to_dict(self):
return {'text': self.text, 'start': self.start, 'end': self.end}
def strip(self, s):
if self.text.startswith(s):
self.text = self.text[len(s):]
self.start += len(s)
if self.text.endswith(s):
self.text = self.text[:-len(s)]
self.end -= len(s)
class RawProcessor:
def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False,
redundant: bool = True):
"""
ๅค็ๅๅงๆฐๆฎ
:param file_paths: ่ฆๅค็็ๆไปถ้ๅ็่ทฏๅพ
:param store_path: ๅค็ๅฎๆ็ๆฐๆฎๅญๆพ่ทฏๅพ
:param least_word_len: ๆฝๅ็ๆๅฐ่ฏ้ฟ
:param use_cut: ๆฏๅฆไฝฟ็จๅ่ฏ
:param redundant: ๆฏๅฆไฟ็ๆฏไธชๅฅๅญไธญ็้ๅค่ฏ
"""
self.file_paths = file_paths
self.store_path = store_path
self.use_cut = use_cut
self.least_word_len = least_word_len
self.redundant = redundant
# ๅค็่ฟ็จไธญ็่ฏ้ข็ป่ฎก {word : freq}
self.words_counter = defaultdict(int)
# ็ป่ฎกๆป้ฟๅบฆ๏ผ่ฎพ็ฝฎๅ้max_len {len : freq}
self.length_counter = defaultdict(int)
# ๅค็ๅฎ็word้ๅ
self.processed_data: [ProcessedData] = []
def _get_same_word(self, source: str, target: str):
"""
้ๅๅๅฅๅญ๏ผๅจๅๅฅๅญไธญๆๅไธ็ฎๆ ๅฅๅญ็ธๅ้จๅ๏ผๆ็
งๆ้ฟๅน้
ๅๅ
:param source: ๅๅฅๅญ
:param target: ็ฎๆ ๅฅๅญ
:return: word, word_index
"""
source_len = len(source)
res_words: [Word] = [] # ่ฟๅ็ปๆ
index = 0
while index < source_len:
# ไธๆฏๆ ็น็ฌฆๅท๏ผไธๅจ่งฃๆไธญ
if source[index] not in ZH_SYMBOLS and source[index] in target:
word_len = 1
# ๅๅๅปถ็ณ
while index + word_len < source_len and source[index:index + word_len + 1] in target:
if source[index + word_len] not in ZH_SYMBOLS:
word_len += 1
else:
break
word = source[index:index + word_len]
if len(word) >= self.least_word_len and word not in STOPWORDS:
# ๅ ๅ
ฅ่ฏฅ่ฏ
res_words.append(Word(text=word, start=index, end=index + word_len))
index += word_len
else:
index += 1
return res_words
def _get_same_words_with_cut(self, source: str, target: str):
"""
ไฝฟ็จ็ปๅทดๅ่ฏๆฅๆฝๅ็ธๅ่ฏ
"""
res_words: [Word] = []
source_cut = [Word(*word) for word in jieba.tokenize(source)]
target_cut = [Word(*word) for word in jieba.tokenize(target)]
for word in source_cut:
if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len:
res_words.append(word)
return res_words
def _filter_words(self, words: [Word]):
"""
ๆ นๆฎไธๅฎๆกไปถๅป้คๆฝๅ้ๅ่ฏไธญ็ไธไบ่ฏ
"""
_words = words if self.redundant else list(set(words))
# counter_path = os.path.join(self.store_path, 'word_count.json')
# if not os.path.exists(counter_path):
# return _words
# else:
# counter_dict = read_json(counter_path)
# ๅๅบ้ๅ๏ผๅจ้ๅๆถๅ ้ค
for i in range(len(_words) - 1, -1, -1):
word = _words[i]
word.strip('็')
word.strip('ๅจ')
word.strip('ไธ')
word.strip('ไบ')
# ้ฟๅบฆไธ็ฌฆๅๆๅชๅ
ๅซๆฐๅญๅๅญๆฏ
if len(word.text) < self.least_word_len or word.text.isnumeric():
_words.remove(word)
return _words
@staticmethod
def generate_tags(sequence_len: int, words: [Word]):
"""
ๆ นๆฎ้ฟๅบฆๅไฝ็ฝฎ็ดขๅผไบง็BIOๆ ็ญพ
:param sequence_len: ้ฟๅบฆ
:param words: ๆฝๅ่ฏ้ๅ
:return: ๆ ็ญพ
"""
tags = ['O'] * sequence_len
for word in words:
start = word.start
tags[start] = 'B'
while start < word.end - 1:
start += 1
tags[start] = 'I'
return tags
def process_raw_data(self):
"""
ๅค็ๅๅงๆฐๆฎ
"""
# ๅพช็ฏ่ฏปๅๆไปถ
if self.use_cut:
jieba.load_userdict("data/geo_words_no_normal.txt")
for file_path in self.file_paths:
# ่ฏปๅraw_data
raw_data = read_json(file_path)
logger.info(f"Processing {file_path} - Question count: {len(raw_data)}")
background_key = 'scenario_text' if "websoft" in file_path else 'background' | question = total_question['question']
# ่ทณ่ฟๆฒกๆ่ๆฏไฟกๆฏๆ่งฃๆ็้ข็ฎ
if not background or not explain or len(explain) < 50:
continue
# ้ขๅค็
background = preprocess(background)
explain = preprocess(explain)
question = preprocess(question)
# ๅฏปๆพ้ๅ่ฏ
if self.use_cut:
words_background = self._get_same_words_with_cut(background, explain)
words_question = self._get_same_words_with_cut(question, explain)
else:
words_background = self._get_same_word(background, explain)
words_question = self._get_same_word(question, explain)
# ่ฟๆปคๅชๅฃฐ
words_question = self._filter_words(words_question)
words_background = self._filter_words(words_background)
# ็ป่ฎก่ฏ้ขไฟกๆฏ
for word in words_question + words_background:
self.words_counter[word.text] += 1
# ็ๆๆ ็ญพ
tags_back = self.generate_tags(len(background), words_background)
tags_question = self.generate_tags(len(question), words_question)
# ็ป่ฎก้ฟๅบฆ
self.length_counter[len(question) + len(background)] += 1
# ๆทปๅ ๅค็ๅฅฝ็ๆฐๆฎ/
self.processed_data.append(ProcessedData(question=question,
question_label=tags_question,
background=background,
background_label=tags_back))
if idx < 5:
logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}")
logger.info(f"question_len: {len(question)}, question: {question}")
logger.info("words_question: " + ' | '.join([word.text for word in words_question]))
logger.info("tags_question: " + ' '.join(tags_question))
logger.info(f"background_len: {len(background)}, background: {background}")
logger.info("tags_back: " + ' '.join(tags_back))
logger.info("words_back: " + ' | '.join([word.text for word in words_background]))
logger.info("explain: " + explain)
def write_processed_data(self, data_type: str, processed_data: [ProcessedData]):
"""
ๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆไปถ
:param data_type: ๅๅ
ฅๆไปถ็็ง็ฑป๏ผtrain dev test
:param processed_data: ๅค็ๅฅฝ็ๆฐๆฎ
"""
with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f:
for data in processed_data:
f.write('-DOC_START-\n')
for i in range(len(data.question)):
f.write(data.question[i] + ' ' + data.question_label[i] + '\n')
f.write('\n')
for i in range(len(data.background)):
f.write(data.background[i] + ' ' + data.background_label[i] + '\n')
def prepare_data(self, data_split_dict):
"""
ๆ็
งไธๅฎ็ๆฏไพๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆๅฎๆไปถ
:param data_split_dict:
"""
random.shuffle(self.processed_data)
total_size | # ๅค็raw_data
for idx, total_question in enumerate(raw_data):
# ๆๅ่ๆฏๅ่งฃๆ
background = total_question[background_key]
explain = total_question['explanation'] | random_line_split |
raw_data_process.py | # ๅค็่ฟ็จไธญ็่ฏ้ข็ป่ฎก {word : freq}
self.words_counter = defaultdict(int)
# ็ป่ฎกๆป้ฟๅบฆ๏ผ่ฎพ็ฝฎๅ้max_len {len : freq}
self.length_counter = defaultdict(int)
# ๅค็ๅฎ็word้ๅ
self.processed_data: [ProcessedData] = []
def _get_same_word(self, source: str, target: str):
"""
้ๅๅๅฅๅญ๏ผๅจๅๅฅๅญไธญๆๅไธ็ฎๆ ๅฅๅญ็ธๅ้จๅ๏ผๆ็
งๆ้ฟๅน้
ๅๅ
:param source: ๅๅฅๅญ
:param target: ็ฎๆ ๅฅๅญ
:return: word, word_index
"""
source_len = len(source)
res_words: [Word] = [] # ่ฟๅ็ปๆ
index = 0
while index < source_len:
# ไธๆฏๆ ็น็ฌฆๅท๏ผไธๅจ่งฃๆไธญ
if source[index] not in ZH_SYMBOLS and source[index] in target:
word_len = 1
# ๅๅๅปถ็ณ
while index + word_len < source_len and source[index:index + word_len + 1] in target:
if source[index + word_len] not in ZH_SYMBOLS:
word_len += 1
else:
break
word = source[index:index + word_len]
if len(word) >= self.least_word_len and word not in STOPWORDS:
# ๅ ๅ
ฅ่ฏฅ่ฏ
res_words.append(Word(text=word, start=index, end=index + word_len))
index += word_len
else:
index += 1
return res_words
def _get_same_words_with_cut(self, source: str, target: str):
"""
ไฝฟ็จ็ปๅทดๅ่ฏๆฅๆฝๅ็ธๅ่ฏ
"""
res_words: [Word] = []
source_cut = [Word(*word) for word in jieba.tokenize(source)]
target_cut = [Word(*word) for word in jieba.tokenize(target)]
for word in source_cut:
if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len:
res_words.append(word)
return res_words
def _filter_words(self, words: [Word]):
"""
ๆ นๆฎไธๅฎๆกไปถๅป้คๆฝๅ้ๅ่ฏไธญ็ไธไบ่ฏ
"""
_words = words if self.redundant else list(set(words))
# counter_path = os.path.join(self.store_path, 'word_count.json')
# if not os.path.exists(counter_path):
# return _words
# else:
# counter_dict = read_json(counter_path)
# ๅๅบ้ๅ๏ผๅจ้ๅๆถๅ ้ค
for i in range(len(_words) - 1, -1, -1):
word = _words[i]
word.strip('็')
word.strip('ๅจ')
word.strip('ไธ')
word.strip('ไบ')
# ้ฟๅบฆไธ็ฌฆๅๆๅชๅ
ๅซๆฐๅญๅๅญๆฏ
if len(word.text) < self.least_word_len or word.text.isnumeric():
_words.remove(word)
return _words
@staticmethod
def generate_tags(sequence_len: int, words: [Word]):
"""
ๆ นๆฎ้ฟๅบฆๅไฝ็ฝฎ็ดขๅผไบง็BIOๆ ็ญพ
:param sequence_len: ้ฟๅบฆ
:param words: ๆฝๅ่ฏ้ๅ
:return: ๆ ็ญพ
"""
tags = ['O'] * sequence_len
for word in words:
start = word.start
tags[start] = 'B'
while start < word.end - 1:
start += 1
tags[start] = 'I'
return tags
def process_raw_data(self):
"""
ๅค็ๅๅงๆฐๆฎ
"""
# ๅพช็ฏ่ฏปๅๆไปถ
if self.use_cut:
jieba.load_userdict("data/geo_words_no_normal.txt")
for file_path in self.file_paths:
# ่ฏปๅraw_data
raw_data = read_json(file_path)
logger.info(f"Processing {file_path} - Question count: {len(raw_data)}")
background_key = 'scenario_text' if "websoft" in file_path else 'background'
# ๅค็raw_data
for idx, total_question in enumerate(raw_data):
# ๆๅ่ๆฏๅ่งฃๆ
background = total_question[background_key]
explain = total_question['explanation']
question = total_question['question']
# ่ทณ่ฟๆฒกๆ่ๆฏไฟกๆฏๆ่งฃๆ็้ข็ฎ
if not background or not explain or len(explain) < 50:
continue
# ้ขๅค็
background = preprocess(background)
explain = preprocess(explain)
question = preprocess(question)
# ๅฏปๆพ้ๅ่ฏ
if self.use_cut:
words_background = self._get_same_words_with_cut(background, explain)
words_question = self._get_same_words_with_cut(question, explain)
else:
words_background = self._get_same_word(background, explain)
words_question = self._get_same_word(question, explain)
# ่ฟๆปคๅชๅฃฐ
words_question = self._filter_words(words_question)
words_background = self._filter_words(words_background)
# ็ป่ฎก่ฏ้ขไฟกๆฏ
for word in words_question + words_background:
self.words_counter[word.text] += 1
# ็ๆๆ ็ญพ
tags_back = self.generate_tags(len(background), words_background)
tags_question = self.generate_tags(len(question), words_question)
# ็ป่ฎก้ฟๅบฆ
self.length_counter[len(question) + len(background)] += 1
# ๆทปๅ ๅค็ๅฅฝ็ๆฐๆฎ/
self.processed_data.append(ProcessedData(question=question,
question_label=tags_question,
background=background,
background_label=tags_back))
if idx < 5:
logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}")
logger.info(f"question_len: {len(question)}, question: {question}")
logger.info("words_question: " + ' | '.join([word.text for word in words_question]))
logger.info("tags_question: " + ' '.join(tags_question))
logger.info(f"background_len: {len(background)}, background: {background}")
logger.info("tags_back: " + ' '.join(tags_back))
logger.info("words_back: " + ' | '.join([word.text for word in words_background]))
logger.info("explain: " + explain)
def write_processed_data(self, data_type: str, processed_data: [ProcessedData]):
"""
ๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆไปถ
:param data_type: ๅๅ
ฅๆไปถ็็ง็ฑป๏ผtrain dev test
:param processed_data: ๅค็ๅฅฝ็ๆฐๆฎ
"""
with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f:
for data in processed_data:
f.write('-DOC_START-\n')
for i in range(len(data.question)):
f.write(data.question[i] + ' ' + data.question_label[i] + '\n')
f.write('\n')
for i in range(len(data.background)):
f.write(data.background[i] + ' ' + data.background_label[i] + '\n')
def prepare_data(self, data_split_dict):
"""
ๆ็
งไธๅฎ็ๆฏไพๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆๅฎๆไปถ
:param data_split_dict:
"""
random.shuffle(self.processed_data)
total_size = len(self.processed_data)
train_size = int(data_split_dict['train'] * total_size)
dev_size = int(data_split_dict['dev'] * total_size)
# [a,b)ๅทฆๅผๅณ้ญๅบ้ด
self.write_processed_data('train', self.processed_data[:train_size])
self.write_processed_data('dev', self.processed_data[train_size:dev_size + train_size])
self.write_processed_data('test', self.processed_data[dev_size + train_size:])
logger.info(f"Prepared: total size = {total_size} | train size = {train_size} | dev size = {dev_size}")
def write_counter(counter: dict, path, key=None, reverse=False):
"""
ๅฐ่ฏ้ข็ป่ฎกๆๅบ็ถๅๅๅ
ฅjsonๆไปถ
"""
ordered_words_counter = OrderedDict(
sorted(counter.items(), key=key, reverse=reverse))
write_json(path, ordered_words_counter)
def start():
# ๆฐๆฎๅญๅจ่ทฏๅพ
data_process_types = ['data_all']
cuts = ['cut', 'no_cut']
redundants = ['no_redundant']
for data_process_type in data_process_types:
data_path = os.path.join('./data/raw', data_process_type)
# ๅพ
ๅค็ๆไปถ้ๅ
files = ['53_data.json', 'spider_data.json', 'websoft_data.json']
file_paths = []
for file in files:
file_paths.append(os.path.join(data_path, file))
# ๆฏๅฆๅ่ฏ
for cut in cuts:
use_cut = (cut == 'cut')
# ๆฏๅฆๅ
่ฎธ้ๅค
for redundant in redundants:
use_redundant = (redundant == 'redundant')
store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant)
| if not os.path.exists(store_data_path):
os.makedirs(store_data_path)
processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut,
| identifier_body |
|
raw_data_process.py | ') as f:
for line in f:
stopwords.add(line.strip())
return stopwords
class ProcessedData:
def __init__(self, question, question_label, background, background_label):
self.question = question
self.question_label = question_label
self.background = background
self.background_label = background_label
class Word:
def __init__(self, text: str, start: int, end: int):
self.text = text
self.start = start
self.end = end
# ๅฎ็ฐ__hash__ and __eq__ ๆนไพฟไนๅ็setๅป้
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
return type(self) == type(other) and self.text == other.text
def __str__(self):
return str(self.to_dict())
def to_dict(self):
return {'text': self.text, 'start': self.start, 'end': self.end}
def strip(self, s):
if self.text.startswith(s):
self.text = self.text[len(s):]
self.start += len(s)
if self.text.endswith(s):
self.text = self.text[:-len(s)]
self.end -= len(s)
class RawProcessor:
def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False,
redundant: bool = True):
"""
ๅค็ๅๅงๆฐๆฎ
:param file_paths: ่ฆๅค็็ๆไปถ้ๅ็่ทฏๅพ
:param store_path: ๅค็ๅฎๆ็ๆฐๆฎๅญๆพ่ทฏๅพ
:param least_word_len: ๆฝๅ็ๆๅฐ่ฏ้ฟ
:param use_cut: ๆฏๅฆไฝฟ็จๅ่ฏ
:param redundant: ๆฏๅฆไฟ็ๆฏไธชๅฅๅญไธญ็้ๅค่ฏ
"""
self.file_paths = file_paths
self.store_path = store_path
self.use_cut = use_cut
self.least_word_len = least_word_len
self.redundant = redundant
# ๅค็่ฟ็จไธญ็่ฏ้ข็ป่ฎก {word : freq}
self.words_counter = defaultdict(int)
# ็ป่ฎกๆป้ฟๅบฆ๏ผ่ฎพ็ฝฎๅ้max_len {len : freq}
self.length_counter = defaultdict(int)
# ๅค็ๅฎ็word้ๅ
self.processed_data: [ProcessedData] = []
def _get_same_word(self, source: str, target: str):
"""
้ๅๅๅฅๅญ๏ผๅจๅๅฅๅญไธญๆๅไธ็ฎๆ ๅฅๅญ็ธๅ้จๅ๏ผๆ็
งๆ้ฟๅน้
ๅๅ
:param source: ๅๅฅๅญ
:param target: ็ฎๆ ๅฅๅญ
:return: word, word_index
"""
source_len = len(source)
res_words: [Word] = [] # ่ฟๅ็ปๆ
index = 0
while index < source_len:
# ไธๆฏๆ ็น็ฌฆๅท๏ผไธๅจ่งฃๆไธญ
if source[index] not in ZH_SYMBOLS and source[index] in target:
word_len = 1
# ๅๅๅปถ็ณ
while index + word_len < source_len and source[index:index + word_len + 1] in target:
if source[index + word_len] not in ZH_SYMBOLS:
word_len += 1
else:
break
word = source[index:index + word_len]
if len(word) >= self.least_word_len and word not in STOPWORDS:
# ๅ ๅ
ฅ่ฏฅ่ฏ
res_words.append(Word(text=word, start=index, end=index + word_len))
index += word_len
else:
index += 1
return res_words
def _get_same_words_with_c | word) for word in jieba.tokenize(source)]
target_cut = [Word(*word) for word in jieba.tokenize(target)]
for word in source_cut:
if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len:
res_words.append(word)
return res_words
def _filter_words(self, words: [Word]):
"""
ๆ นๆฎไธๅฎๆกไปถๅป้คๆฝๅ้ๅ่ฏไธญ็ไธไบ่ฏ
"""
_words = words if self.redundant else list(set(words))
# counter_path = os.path.join(self.store_path, 'word_count.json')
# if not os.path.exists(counter_path):
# return _words
# else:
# counter_dict = read_json(counter_path)
# ๅๅบ้ๅ๏ผๅจ้ๅๆถๅ ้ค
for i in range(len(_words) - 1, -1, -1):
word = _words[i]
word.strip('็')
word.strip('ๅจ')
word.strip('ไธ')
word.strip('ไบ')
# ้ฟๅบฆไธ็ฌฆๅๆๅชๅ
ๅซๆฐๅญๅๅญๆฏ
if len(word.text) < self.least_word_len or word.text.isnumeric():
_words.remove(word)
return _words
@staticmethod
def generate_tags(sequence_len: int, words: [Word]):
"""
ๆ นๆฎ้ฟๅบฆๅไฝ็ฝฎ็ดขๅผไบง็BIOๆ ็ญพ
:param sequence_len: ้ฟๅบฆ
:param words: ๆฝๅ่ฏ้ๅ
:return: ๆ ็ญพ
"""
tags = ['O'] * sequence_len
for word in words:
start = word.start
tags[start] = 'B'
while start < word.end - 1:
start += 1
tags[start] = 'I'
return tags
def process_raw_data(self):
"""
ๅค็ๅๅงๆฐๆฎ
"""
# ๅพช็ฏ่ฏปๅๆไปถ
if self.use_cut:
jieba.load_userdict("data/geo_words_no_normal.txt")
for file_path in self.file_paths:
# ่ฏปๅraw_data
raw_data = read_json(file_path)
logger.info(f"Processing {file_path} - Question count: {len(raw_data)}")
background_key = 'scenario_text' if "websoft" in file_path else 'background'
# ๅค็raw_data
for idx, total_question in enumerate(raw_data):
# ๆๅ่ๆฏๅ่งฃๆ
background = total_question[background_key]
explain = total_question['explanation']
question = total_question['question']
# ่ทณ่ฟๆฒกๆ่ๆฏไฟกๆฏๆ่งฃๆ็้ข็ฎ
if not background or not explain or len(explain) < 50:
continue
# ้ขๅค็
background = preprocess(background)
explain = preprocess(explain)
question = preprocess(question)
# ๅฏปๆพ้ๅ่ฏ
if self.use_cut:
words_background = self._get_same_words_with_cut(background, explain)
words_question = self._get_same_words_with_cut(question, explain)
else:
words_background = self._get_same_word(background, explain)
words_question = self._get_same_word(question, explain)
# ่ฟๆปคๅชๅฃฐ
words_question = self._filter_words(words_question)
words_background = self._filter_words(words_background)
# ็ป่ฎก่ฏ้ขไฟกๆฏ
for word in words_question + words_background:
self.words_counter[word.text] += 1
# ็ๆๆ ็ญพ
tags_back = self.generate_tags(len(background), words_background)
tags_question = self.generate_tags(len(question), words_question)
# ็ป่ฎก้ฟๅบฆ
self.length_counter[len(question) + len(background)] += 1
# ๆทปๅ ๅค็ๅฅฝ็ๆฐๆฎ/
self.processed_data.append(ProcessedData(question=question,
question_label=tags_question,
background=background,
background_label=tags_back))
if idx < 5:
logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}")
logger.info(f"question_len: {len(question)}, question: {question}")
logger.info("words_question: " + ' | '.join([word.text for word in words_question]))
logger.info("tags_question: " + ' '.join(tags_question))
logger.info(f"background_len: {len(background)}, background: {background}")
logger.info("tags_back: " + ' '.join(tags_back))
logger.info("words_back: " + ' | '.join([word.text for word in words_background]))
logger.info("explain: " + explain)
def write_processed_data(self, data_type: str, processed_data: [ProcessedData]):
"""
ๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆไปถ
:param data_type: ๅๅ
ฅๆไปถ็็ง็ฑป๏ผtrain dev test
:param processed_data: ๅค็ๅฅฝ็ๆฐๆฎ
"""
with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f:
for data in processed_data:
f.write('-DOC_START-\n')
for i in range(len(data.question)):
f.write(data.question[i] + ' ' + data.question_label[i] + '\n')
f.write('\n')
for i in range(len(data.background)):
f.write(data.background[i] + ' ' + data.background_label[i] + '\n')
def prepare_data(self, data_split_dict):
"""
ๆ็
งไธๅฎ็ๆฏไพๅฐๅค็ๅฅฝ็ๆฐๆฎๅๅ
ฅๆๅฎๆไปถ
:param data_split_dict:
"""
random.shuffle(self.processed_data)
total | ut(self, source: str, target: str):
"""
ไฝฟ็จ็ปๅทดๅ่ฏๆฅๆฝๅ็ธๅ่ฏ
"""
res_words: [Word] = []
source_cut = [Word(* | conditional_block |
exchange_endpoint0.py | eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether โsigโ is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
retu | print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either โAlgorandโ or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the โOrderโ table.
# Instead, insert a record into the โLogโ table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the โpayloadโ in the โOrderโ table EXCEPT for 'platformโ.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The โ/order_bookโ endpoint should return a list of all orders in the database.
# The response should contain a single key โdataโ that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โsignatureโ).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
| rn {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# | identifier_body |
exchange_endpoint0.py | eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether โsigโ is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order) | # print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either โAlgorandโ or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the โOrderโ table.
# Instead, insert a record into the โLogโ table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the โpayloadโ in the โOrderโ table EXCEPT for 'platformโ.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The โ/order_bookโ endpoint should return a list of all orders in the database.
# The response should contain a single key โdataโ that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โsignatureโ).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
result |
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length") | random_line_split |
exchange_endpoint0.py | eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether โsigโ is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
prin |
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either โAlgorandโ or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the โOrderโ table.
# Instead, insert a record into the โLogโ table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the โpayloadโ in the โOrderโ table EXCEPT for 'platformโ.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The โ/order_bookโ endpoint should return a list of all orders in the database.
# The response should contain a single key โdataโ that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โsignatureโ).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
| t( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
| conditional_block |
exchange_endpoint0.py | eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether โsigโ is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_ |
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either โAlgorandโ or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the โOrderโ table.
# Instead, insert a record into the โLogโ table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the โpayloadโ in the โOrderโ table EXCEPT for 'platformโ.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The โ/order_bookโ endpoint should return a list of all orders in the database.
# The response should contain a single key โdataโ that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โsignatureโ).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
| message(d): | identifier_name |
wxpayv3.go | // StWxPayRawResp ๅๅค
type StWxPayRawResp struct {
ID string `json:"id"`
CreateTime time.Time `json:"create_time"`
ResourceType string `json:"resource_type"`
EventType string `json:"event_type"`
Summary string `json:"summary"`
Resource struct {
OriginalType string `json:"original_type"`
Algorithm string `json:"algorithm"`
Ciphertext string `json:"ciphertext"`
AssociatedData string `json:"associated_data"`
Nonce string `json:"nonce"`
} `json:"resource"`
}
// StWxPayResp ๅๅค
type StWxPayResp struct {
Mchid string `json:"mchid"`
Appid string `json:"appid"`
OutTradeNo string `json:"out_trade_no"`
TransactionID string `json:"transaction_id"`
TradeType string `json:"trade_type"`
TradeState string `json:"trade_state"`
TradeStateDesc string `json:"trade_state_desc"`
BankType string `json:"bank_type"`
Attach string `json:"attach"`
SuccessTime time.Time `json:"success_time"`
Payer struct {
Openid string `json:"openid"`
} `json:"payer"`
Amount struct {
Total int `json:"total"`
PayerTotal int `json:"payer_total"`
Currency string `json:"currency"`
PayerCurrency string `json:"payer_currency"`
} `json:"amount"`
}
// StWxRefundCb ๅ่ฐ
type StWxRefundCb struct {
XMLName xml.Name `xml:"root"`
Text string `xml:",chardata"`
OutRefundNo string `xml:"out_refund_no"`
OutTradeNo string `xml:"out_trade_no"`
RefundAccount string `xml:"refund_account"`
RefundFee string `xml:"refund_fee"`
RefundID string `xml:"refund_id"`
RefundRecvAccout string `xml:"refund_recv_accout"`
RefundRequestSource string `xml:"refund_request_source"`
RefundStatus string `xml:"refund_status"`
SettlementRefundFee string `xml:"settlement_refund_fee"`
SettlementTotalFee string `xml:"settlement_total_fee"`
SuccessTime string `xml:"success_time"`
TotalFee string `xml:"total_fee"`
TransactionID string `xml:"transaction_id"`
}
type StWxV3RefundResp struct {
Amount struct {
Currency string `json:"currency"`
DiscountRefund int `json:"discount_refund"`
PayerRefund int `json:"payer_refund"`
PayerTotal int `json:"payer_total"`
Refund int `json:"refund"`
SettlementRefund int `json:"settlement_refund"`
SettlementTotal int `json:"settlement_total"`
Total int `json:"total"`
} `json:"amount"`
Channel string `json:"channel"`
CreateTime time.Time `json:"create_time"`
FundsAccount string `json:"funds_account"`
OutRefundNo string `json:"out_refund_no"`
OutTradeNo string `json:"out_trade_no"`
PromotionDetail []interface{} `json:"promotion_detail"`
RefundID string `json:"refund_id"`
Status string `json:"status"`
TransactionID string `json:"transaction_id"`
UserReceivedAccount string `json:"user_received_account"`
Code string `json:"code"`
Message string `json:"message"`
}
type StWxV3RefundCb struct {
ID string `json:"id"`
CreateTime time.Time `json:"create_time"`
ResourceType string `json:"resource_type"`
EventType string `json:"event_type"`
Summary string `json:"summary"`
Resource struct {
OriginalType string `json:"original_type"`
Algorithm string `json:"algorithm"`
Ciphertext string `json:"ciphertext"`
AssociatedData string `json:"associated_data"`
Nonce string `json:"nonce"`
} `json:"resource"`
}
type StWxV3RefundCbContent struct {
Mchid string `json:"mchid"`
OutTradeNo string `json:"out_trade_no"`
TransactionID string `json:"transaction_id"`
OutRefundNo string `json:"out_refund_no"`
RefundID string `json:"refund_id"`
RefundStatus string `json:"refund_status"`
SuccessTime time.Time `json:"success_time"`
Amount struct {
Total int `json:"total"`
Refund int `json:"refund"`
PayerTotal int `json:"payer_total"`
PayerRefund int `json:"payer_refund"`
} `json:"amount"`
UserReceivedAccount string `json:"user_received_account"`
}
// RsaSign ็ญพๅ
func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) {
shaNew := hash.New()
shaNew.Write([]byte(signContent))
hashed := shaNew.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(signature), nil
}
// WxPayV3SignStr ่ทๅ็ญพๅ็ปๆ
func WxPayV3SignStr(key *rsa.PrivateKey, cols []string) (string, error) {
var buf bytes.Buffer
for _, col := range cols {
buf.WriteString(col)
buf.WriteString("\n")
}
sign, err := RsaSign(buf.String(), key, crypto.SHA256)
if err != nil {
return "", err
}
return sign, nil
}
// WxPayV3Sign v3็ญพๅ
func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) {
timestamp := time.Now().Unix()
nonce := GetUUIDStr()
uri, err := url.Parse(req.Url)
if err != nil {
return nil, err
}
var bodyBytes []byte
if req.Method == "POST" {
request, err := req.MakeRequest()
if err != nil {
return nil, err
}
bodyReader, err := request.GetBody()
if err != nil {
return nil, err
}
bodyBytes, err = ioutil.ReadAll(bodyReader)
if err != nil {
return nil, err
}
}
sign, err := WxPayV3SignStr(key, []string{
req.Method,
uri.Path,
strconv.FormatInt(timestamp, 10),
nonce,
string(bodyBytes),
})
if err != nil {
return nil, err
}
auth := fmt.Sprintf(
`WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`,
mchid,
nonce,
sign,
timestamp,
keySerial,
)
req = req.
Set("Authorization", auth).
Set("Accept", "application/json").
Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50")
return req, nil
}
// WxPayV3Decrype ่งฃๅฏ
func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) {
keyBytes := []byte(key)
nonceBytes := []byte(nonce)
associatedDataBytes := []byte(associatedData)
ciphertext, err := base64.StdEncoding.DecodeString(cipherStr)
if err != nil {
return "", err
}
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes)
if err != nil {
return "", err
}
return string(plaintext), nil
}
// WxPayV3CheckSign v3็ญพๅ้ช่ฏ
func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error {
if len(cerStr) == 0 {
return fmt.Errorf("no cer")
}
timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp")
if err != nil {
return err
}
| jsoniter "github.com/json-iterator/go"
"github.com/parnurzeal/gorequest"
)
| random_line_split |
|
wxpayv3.go | "`
DiscountRefund int `json:"discount_refund"`
PayerRefund int `json:"payer_refund"`
PayerTotal int `json:"payer_total"`
Refund int `json:"refund"`
SettlementRefund int `json:"settlement_refund"`
SettlementTotal int `json:"settlement_total"`
Total int `json:"total"`
} `json:"amount"`
Channel string `json:"channel"`
CreateTime time.Time `json:"create_time"`
FundsAccount string `json:"funds_account"`
OutRefundNo string `json:"out_refund_no"`
OutTradeNo string `json:"out_trade_no"`
PromotionDetail []interface{} `json:"promotion_detail"`
RefundID string `json:"refund_id"`
Status string `json:"status"`
TransactionID string `json:"transaction_id"`
UserReceivedAccount string `json:"user_received_account"`
Code string `json:"code"`
Message string `json:"message"`
}
type StWxV3RefundCb struct {
ID string `json:"id"`
CreateTime time.Time `json:"create_time"`
ResourceType string `json:"resource_type"`
EventType string `json:"event_type"`
Summary string `json:"summary"`
Resource struct {
OriginalType string `json:"original_type"`
Algorithm string `json:"algorithm"`
Ciphertext string `json:"ciphertext"`
AssociatedData string `json:"associated_data"`
Nonce string `json:"nonce"`
} `json:"resource"`
}
type StWxV3RefundCbContent struct {
Mchid string `json:"mchid"`
OutTradeNo string `json:"out_trade_no"`
TransactionID string `json:"transaction_id"`
OutRefundNo string `json:"out_refund_no"`
RefundID string `json:"refund_id"`
RefundStatus string `json:"refund_status"`
SuccessTime time.Time `json:"success_time"`
Amount struct {
Total int `json:"total"`
Refund int `json:"refund"`
PayerTotal int `json:"payer_total"`
PayerRefund int `json:"payer_refund"`
} `json:"amount"`
UserReceivedAccount string `json:"user_received_account"`
}
// RsaSign ็ญพๅ
func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) {
shaNew := hash.New()
shaNew.Write([]byte(signContent))
hashed := shaNew.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(signature), nil
}
// WxPayV3SignStr ่ทๅ็ญพๅ็ปๆ
func WxPayV3SignStr(key *rsa.Priv | ]string) (string, error) {
var buf bytes.Buffer
for _, col := range cols {
buf.WriteString(col)
buf.WriteString("\n")
}
sign, err := RsaSign(buf.String(), key, crypto.SHA256)
if err != nil {
return "", err
}
return sign, nil
}
// WxPayV3Sign v3็ญพๅ
func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) {
timestamp := time.Now().Unix()
nonce := GetUUIDStr()
uri, err := url.Parse(req.Url)
if err != nil {
return nil, err
}
var bodyBytes []byte
if req.Method == "POST" {
request, err := req.MakeRequest()
if err != nil {
return nil, err
}
bodyReader, err := request.GetBody()
if err != nil {
return nil, err
}
bodyBytes, err = ioutil.ReadAll(bodyReader)
if err != nil {
return nil, err
}
}
sign, err := WxPayV3SignStr(key, []string{
req.Method,
uri.Path,
strconv.FormatInt(timestamp, 10),
nonce,
string(bodyBytes),
})
if err != nil {
return nil, err
}
auth := fmt.Sprintf(
`WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`,
mchid,
nonce,
sign,
timestamp,
keySerial,
)
req = req.
Set("Authorization", auth).
Set("Accept", "application/json").
Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50")
return req, nil
}
// WxPayV3Decrype ่งฃๅฏ
func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) {
keyBytes := []byte(key)
nonceBytes := []byte(nonce)
associatedDataBytes := []byte(associatedData)
ciphertext, err := base64.StdEncoding.DecodeString(cipherStr)
if err != nil {
return "", err
}
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes)
if err != nil {
return "", err
}
return string(plaintext), nil
}
// WxPayV3CheckSign v3็ญพๅ้ช่ฏ
func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error {
if len(cerStr) == 0 {
return fmt.Errorf("no cer")
}
timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp")
if err != nil {
return err
}
nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce")
if err != nil {
return err
}
signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature")
if err != nil {
return err
}
checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n"
block, _ := pem.Decode([]byte(cerStr))
var cert *x509.Certificate
cert, err = x509.ParseCertificate(block.Bytes)
if err != nil {
return err
}
rsaPublicKey := cert.PublicKey.(*rsa.PublicKey)
oldSign, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(checkStr))
err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign)
return err
}
// WxPayV3GetHeaderByKey ่ทๅๅคด
func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) {
v, ok := header[key]
if !ok {
return "", fmt.Errorf("no key %s", key)
}
if len(v) == 0 {
return "", fmt.Errorf("key empty %s", key)
}
return v[0], nil
}
// WxPayV3GetPrepay ่ทๅ้ขๆฏไปไฟกๆฏ
func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) {
req := gorequest.New().
Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi").
Send(
H{
"appid": appID,
"mchid": mchID,
"description": payBody,
"out_trade_no": outTradeNo,
"time_expire": expireAt.Format(time.RFC3339),
"notify_url": cbURL,
"amount": H{
"total": totalFee,
},
"payer": H{
"openid": openID,
},
},
)
req, err := WxPayV3Sign(
mchID,
keySerial,
key,
req,
)
if err != nil {
return nil, "", err
}
_, body, errs := req.EndBytes()
if errs != nil {
return nil, "", errs[0]
}
var prepayResp struct {
PrepayID string `json:"prepay_id"`
}
err = jsoniter.Unmarshal(body, & | ateKey, cols [ | identifier_name |
wxpayv3.go | nil {
return err
}
hashed := sha256.Sum256([]byte(checkStr))
err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign)
return err
}
// WxPayV3GetHeaderByKey ่ทๅๅคด
func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) {
v, ok := header[key]
if !ok {
return "", fmt.Errorf("no key %s", key)
}
if len(v) == 0 {
return "", fmt.Errorf("key empty %s", key)
}
return v[0], nil
}
// WxPayV3GetPrepay ่ทๅ้ขๆฏไปไฟกๆฏ
func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) {
req := gorequest.New().
Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi").
Send(
H{
"appid": appID,
"mchid": mchID,
"description": payBody,
"out_trade_no": outTradeNo,
"time_expire": expireAt.Format(time.RFC3339),
"notify_url": cbURL,
"amount": H{
"total": totalFee,
},
"payer": H{
"openid": openID,
},
},
)
req, err := WxPayV3Sign(
mchID,
keySerial,
key,
req,
)
if err != nil {
return nil, "", err
}
_, body, errs := req.EndBytes()
if errs != nil {
return nil, "", errs[0]
}
var prepayResp struct {
PrepayID string `json:"prepay_id"`
}
err = jsoniter.Unmarshal(body, &prepayResp)
if err != nil {
return nil, "", err
}
if len(prepayResp.PrepayID) == 0 {
return nil, "", fmt.Errorf("get prepay id err: %s", body)
}
v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID)
if err != nil {
return nil, "", err
}
return v, prepayResp.PrepayID, nil
}
// WxPayV3SignPrepayid ็ญพๅprepayid
func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) {
objTimestamp := strconv.FormatInt(time.Now().Unix(), 10)
objNonce := GetUUIDStr()
objCol := fmt.Sprintf("prepay_id=%s", prepayid)
objSign, err := WxPayV3SignStr(
key,
[]string{
appID,
objTimestamp,
objNonce,
objCol,
},
)
if err != nil {
return nil, err
}
v := gin.H{
"timeStamp": objTimestamp,
"nonceStr": objNonce,
"package": objCol,
"signType": "RSA",
"paySign": objSign,
}
return v, nil
}
// WxPayV3DecodePayResp ่งฃๆๆฏไปๅ่ฐ
func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) {
var rawResp StWxPayRawResp
err := jsoniter.Unmarshal(body, &rawResp)
if err != nil {
return nil, err
}
if rawResp.EventType != "TRANSACTION.SUCCESS" {
return nil, fmt.Errorf("error event_type: %s", rawResp.EventType)
}
if rawResp.ResourceType != "encrypt-resource" {
return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType)
}
originalType := rawResp.Resource.OriginalType
if originalType != "transaction" {
return nil, fmt.Errorf("error original_type: %s", originalType)
}
algorithm := rawResp.Resource.Algorithm
if algorithm != "AEAD_AES_256_GCM" {
return nil, fmt.Errorf("error algorithm: %s", algorithm)
}
ciphertext := rawResp.Resource.Ciphertext
associatedData := rawResp.Resource.AssociatedData
nonce := rawResp.Resource.Nonce
plain, err := WxPayV3Decrype(
v3Key,
ciphertext,
nonce,
associatedData,
)
if err != nil {
return nil, err
}
var finalResp StWxPayResp
err = jsoniter.Unmarshal([]byte(plain), &finalResp)
if err != nil {
return nil, err
}
if finalResp.Mchid != mchid {
return nil, fmt.Errorf("mchid error")
}
if finalResp.Appid != appid {
return nil, fmt.Errorf("appid error")
}
if finalResp.TradeState != "SUCCESS" {
return nil, fmt.Errorf("error trade_state: %s", finalResp.TradeState)
}
return &finalResp, nil
}
// WxPayCheckRefundCb ้ช่ฏๅ่ฐ
func WxPayCheckRefundCb(mchKey string, body []byte) (*StWxRefundCb, error) {
mchKeyMd5 := fmt.Sprintf("%x", md5.Sum([]byte(mchKey)))
bodyMap, err := XMLWalk(body)
if err != nil {
// ่ฟๅๆฐๆฎ
return nil, err
}
reqInfo, ok := bodyMap["req_info"]
if !ok {
return nil, fmt.Errorf("no key req_info %s", body)
}
reqInfoStr, ok := reqInfo.(string)
if !ok {
return nil, fmt.Errorf("error format req_info: %s", body)
}
reqInfoBytes, err := base64.StdEncoding.DecodeString(reqInfoStr)
if err != nil {
return nil, err
}
reqInfoFull, err := DecryptAesEcb(reqInfoBytes, []byte(mchKeyMd5))
if err != nil {
return nil, err
}
var bodyXML StWxRefundCb
err = xml.Unmarshal(reqInfoFull, &bodyXML)
if err != nil {
return nil, err
}
return &bodyXML, nil
}
// WxPayV3Refunds ้ๆฌพ
func WxPayV3Refunds(keySerial string, key *rsa.PrivateKey, mchID, transactionID, outRefundNo, cbURL string, totalFee, refundFee int64) (*StWxV3RefundResp, error) {
req := gorequest.New().
Post("https://api.mch.weixin.qq.com/v3/refund/domestic/refunds").
Send(
H{
"transaction_id": transactionID,
"out_refund_no": outRefundNo,
"notify_url": cbURL,
"amount": H{
"refund": refundFee,
"total": totalFee,
"currency": "CNY",
},
},
)
req, err := WxPayV3Sign(
mchID,
keySerial,
key,
req,
)
if err != nil {
return nil, err
}
_, body, errs := req.EndBytes()
if errs != nil {
return nil, errs[0]
}
Log.Debugf("body: %s", body)
var resp StWxV3RefundResp
err = jsoniter.Unmarshal(body, &resp)
if err != nil {
return nil, err
}
if resp.Code != "" {
return nil, fmt.Errorf("refund err: %s", body)
}
return &resp, nil
}
// WxPayV3DecodeRefundsCb ่งฃๆ้ๆฌพๅ่ฐ
func WxPayV3DecodeRefundsCb(v3Key string, body []byte) (*StWxV3RefundCbContent, error) {
var rawResp StWxV3RefundCb
err := jsoniter.Unmarshal(body, &rawResp)
if err != nil {
return nil, err
}
| if rawResp.EventType != "REFUND.SUCCESS" {
return nil, fmt.Errorf("error event_type: %s", rawResp.EventType)
}
if rawResp.ResourceType != "encrypt-resource" {
return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType)
}
originalType := rawResp.Resource.OriginalType
if originalType != "refund" {
return nil, fmt.Errorf("error original_type: %s", originalType)
}
algorithm := rawResp.Resource.Algorithm
if algorithm != "AEAD_AES_256_GCM" {
return nil, fmt.Errorf("error algorithm: %s", algorithm)
}
ciphertext := rawResp.Resource.Ciphertext
associatedData := rawResp.Resource.AssociatedData
nonce := rawResp.Resource.Nonce
plain, err := WxPayV3Decrype( | identifier_body |
|
wxpayv3.go | time.Time `json:"success_time"`
Amount struct {
Total int `json:"total"`
Refund int `json:"refund"`
PayerTotal int `json:"payer_total"`
PayerRefund int `json:"payer_refund"`
} `json:"amount"`
UserReceivedAccount string `json:"user_received_account"`
}
// RsaSign ็ญพๅ
func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) {
shaNew := hash.New()
shaNew.Write([]byte(signContent))
hashed := shaNew.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(signature), nil
}
// WxPayV3SignStr ่ทๅ็ญพๅ็ปๆ
func WxPayV3SignStr(key *rsa.PrivateKey, cols []string) (string, error) {
var buf bytes.Buffer
for _, col := range cols {
buf.WriteString(col)
buf.WriteString("\n")
}
sign, err := RsaSign(buf.String(), key, crypto.SHA256)
if err != nil {
return "", err
}
return sign, nil
}
// WxPayV3Sign v3็ญพๅ
func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) {
timestamp := time.Now().Unix()
nonce := GetUUIDStr()
uri, err := url.Parse(req.Url)
if err != nil {
return nil, err
}
var bodyBytes []byte
if req.Method == "POST" {
request, err := req.MakeRequest()
if err != nil {
return nil, err
}
bodyReader, err := request.GetBody()
if err != nil {
return nil, err
}
bodyBytes, err = ioutil.ReadAll(bodyReader)
if err != nil {
return nil, err
}
}
sign, err := WxPayV3SignStr(key, []string{
req.Method,
uri.Path,
strconv.FormatInt(timestamp, 10),
nonce,
string(bodyBytes),
})
if err != nil {
return nil, err
}
auth := fmt.Sprintf(
`WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`,
mchid,
nonce,
sign,
timestamp,
keySerial,
)
req = req.
Set("Authorization", auth).
Set("Accept", "application/json").
Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50")
return req, nil
}
// WxPayV3Decrype ่งฃๅฏ
func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) {
keyBytes := []byte(key)
nonceBytes := []byte(nonce)
associatedDataBytes := []byte(associatedData)
ciphertext, err := base64.StdEncoding.DecodeString(cipherStr)
if err != nil {
return "", err
}
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes)
if err != nil {
return "", err
}
return string(plaintext), nil
}
// WxPayV3CheckSign v3็ญพๅ้ช่ฏ
func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error {
if len(cerStr) == 0 {
return fmt.Errorf("no cer")
}
timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp")
if err != nil {
return err
}
nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce")
if err != nil {
return err
}
signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature")
if err != nil {
return err
}
checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n"
block, _ := pem.Decode([]byte(cerStr))
var cert *x509.Certificate
cert, err = x509.ParseCertificate(block.Bytes)
if err != nil {
return err
}
rsaPublicKey := cert.PublicKey.(*rsa.PublicKey)
oldSign, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(checkStr))
err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign)
return err
}
// WxPayV3GetHeaderByKey ่ทๅๅคด
func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) {
v, ok := header[key]
if !ok {
return "", fmt.Errorf("no key %s", key)
}
if len(v) == 0 {
return "", fmt.Errorf("key empty %s", key)
}
return v[0], nil
}
// WxPayV3GetPrepay ่ทๅ้ขๆฏไปไฟกๆฏ
func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) {
req := gorequest.New().
Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi").
Send(
H{
"appid": appID,
"mchid": mchID,
"description": payBody,
"out_trade_no": outTradeNo,
"time_expire": expireAt.Format(time.RFC3339),
"notify_url": cbURL,
"amount": H{
"total": totalFee,
},
"payer": H{
"openid": openID,
},
},
)
req, err := WxPayV3Sign(
mchID,
keySerial,
key,
req,
)
if err != nil {
return nil, "", err
}
_, body, errs := req.EndBytes()
if errs != nil {
return nil, "", errs[0]
}
var prepayResp struct {
PrepayID string `json:"prepay_id"`
}
err = jsoniter.Unmarshal(body, &prepayResp)
if err != nil {
return nil, "", err
}
if len(prepayResp.PrepayID) == 0 {
return nil, "", fmt.Errorf("get prepay id err: %s", body)
}
v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID)
if err != nil {
return nil, "", err
}
return v, prepayResp.PrepayID, nil
}
// WxPayV3SignPrepayid ็ญพๅprepayid
func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) {
objTimestamp := strconv.FormatInt(time.Now().Unix(), 10)
objNonce := GetUUIDStr()
objCol := fmt.Sprintf("prepay_id=%s", prepayid)
objSign, err := WxPayV3SignStr(
key,
[]string{
appID,
objTimestamp,
objNonce,
objCol,
},
)
if err != nil {
return nil, err
}
v := gin.H{
"timeStamp": objTimestamp,
"nonceStr": objNonce,
"package": objCol,
"signType": "RSA",
"paySign": objSign,
}
return v, nil
}
// WxPayV3DecodePayResp ่งฃๆๆฏไปๅ่ฐ
func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) {
var rawResp StWxPayRawResp
err := jsoniter.Unmarshal(body, &rawResp)
if err != nil {
return nil, err
}
if rawResp.EventType != "TRANSACTION.SUCCESS" {
return nil, fmt.Errorf("error event_type: %s", rawResp.EventType)
}
if rawResp.ResourceType != "encrypt-resource" {
return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType)
}
| originalType := rawResp.Resource.OriginalType
if originalType != "transaction | conditional_block |
|
functions.py | 5), (x, y, 50, 50), width=1)
def print_log():
font = pygame.font.Font(None, 25)
text_coord = 25
for line in all_logs:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 725
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
def update_wall_color(cur_level):
if cur_level == 1:
texture_wall = load_image("image/Brick_Wall_009.jpg")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
elif cur_level == 2:
texture_wall = load_image("image/stone_wall2.png")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
else:
texture_wall = load_image("image/wall3.jpg")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
return texture_wall
texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg")
texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size))
hp_bar = load_image("image/hud/frame.png")
hp_bar = pygame.transform.scale(hp_bar, (275, 20))
door = load_image("image/castledoors.png")
door = pygame.transform.scale(door, (cell_size, cell_size))
frame = load_image("image/hud/button_1(frame).png")
frame = pygame.transform.scale(frame, (cell_size, cell_size))
blood_screen = load_image("image/BloodOverlay.png")
blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT))
# ัะพะทะดะฐะดะธะผ ะณััะฟะฟั, ัะพะดะตัะถะฐััั ะฒัะต ัะฟัะฐะนัั
all_sprites = pygame.sprite.Group()
equipment_sprites = pygame.sprite.Group()
inventory_sprites = pygame.sprite.Group()
character_sprites = pygame.sprite.Group()
def terminate():
pygame.quit()
sys.exit()
def start_screen():
intro_text = ["ะะะะ ะ ะะะะะะะะะขะฌ ะ PYDUNGEON",
"ะะปั ัะพะณะพ, ััะพะฑั ะฒัะฑัะฐัััั ะพั ััะดะฐ,",
"ะะฐะผ ะฟะพะฝะฐะดะพะฑะธััั ะฟัะพะนัะธ ััะธ ััะพะฒะฝั ะบะฐัะฐะบะพะผะฑ ะธ ะฟะพะฑะพัะพัั ะฝะตะฒะธะดะฐะฝะฝัั
ััะดะธั",
"ะัะปะธ ะั ะฟัะธั
, ะฝะฐะถะผะธัะต ะปัะฑัั ะบะฝะพะฟะบั"]
fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level2_screen():
intro_text = ["ะะซ ะกะะะะะ ะะ ะะะขะ ะะะ ะะซะ ะฃะ ะะะะะฌ ะะะะะะะะะฌะฏ",
"ะฝะพ ััะพ ะตัะต ะฝะต ะบะพะฝะตั...",
"ะะฟะตัะตะดะธ ะะฐั ะถะดัั ะตัะต ะฑะพะปะตะต ัะธะปัะฝัะต ะฟัะพัะธะฒะฝะธะบะธ",
"ะธ ะณะพัะฐะทะดะพ ะผะตะฝััะต ัะฐะฝัะพะฒ ะฝะฐ ะฒัะถะธะฒะฐะฝะธะต"]
fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level3_screen():
intro_text = ["ะะะะะะะะ ะะะฅะะะะะ ะขะะ ะะะะะะ",
"ะ ะตัะต ะฝะธะบัะพ ะฝะต ะฒะพะทะฒัะฐัะฐะปัั",
"ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผัะฐัะฝะพะณะพ ะธ ะณะปัะฑะพะบะพะณะพ ััะพะฒะฝั",
"ะกะผะตะปััะฐะบ ะปะธ ัั ะธะปะธ ะฑะตะทัะผะตั?"]
fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def gameover_screen():
intro_text = ["ะ ัะปะตะดัััะธะน ัะฐะท ะฟะพะฒะตะทะตั",
"ะฒะพะทะผะพะถะฝะพ..."]
fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def victory_screen():
intro_text = ["ะญัะพ ะฟะพะฑะตะดะฐ!",
"ะั ัะผะพะณะปะธ ะฒัะฑัะฐัััั ะธะท ะฟะพะดะทะตะผะตะปัั,",
"ััะพ ะพะฑะตัะฟะตัะธั ะะฐะผ ะฑะพะณะฐัััะฒะพ ะธ ัะปะฐะฒั ะฝะฐ ะฒัั ะถะธะทะฝั"]
fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def exit_screen():
button_exit = Button(575, 500, 45, 30, "ะะบ")
name = ''
fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
title = font.render('ะะฐะฟะธัะธ ัะฒะพะต ะธะผั', True, (255, 255, 255))
while True:
sc.blit(fon, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.unicode.isalpha():
name += event.unicode
elif event.key == K_BACKSPACE:
name = name[:-1]
elif event.key == K_RETURN:
name = ""
if event.type == pygame.MOUSEBUTTONDOWN:
if button_exit.push_button(event.pos):
gameover(name, killed_monsters)
text = font.render(name, True, (255, 255, 255))
rect = text.get_rect()
rect.center = (600, 400)
button_exit.draw()
sc.blit(text, rect)
sc.blit(title, (500, 250))
pygame.display.flip()
def exchange_equipment_inventory(inventory, hero):
obj = inventory.get_selected_cell()
if obj | s[0]
def draw_white_rect(x, y):
pygame.draw.rect(sc, (255, 255, 25 | identifier_body |
|
functions.py | _size * 2))
elif cur_level == 2:
texture_wall = load_image("image/stone_wall2.png")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
else:
texture_wall = load_image("image/wall3.jpg")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
return texture_wall
texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg")
texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size))
hp_bar = load_image("image/hud/frame.png")
hp_bar = pygame.transform.scale(hp_bar, (275, 20))
door = load_image("image/castledoors.png")
door = pygame.transform.scale(door, (cell_size, cell_size))
frame = load_image("image/hud/button_1(frame).png")
frame = pygame.transform.scale(frame, (cell_size, cell_size))
blood_screen = load_image("image/BloodOverlay.png")
blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT))
# ัะพะทะดะฐะดะธะผ ะณััะฟะฟั, ัะพะดะตัะถะฐััั ะฒัะต ัะฟัะฐะนัั
all_sprites = pygame.sprite.Group()
equipment_sprites = pygame.sprite.Group()
inventory_sprites = pygame.sprite.Group()
character_sprites = pygame.sprite.Group()
def terminate():
pygame.quit()
sys.exit()
def start_screen():
intro_text = ["ะะะะ ะ ะะะะะะะะะขะฌ ะ PYDUNGEON",
"ะะปั ัะพะณะพ, ััะพะฑั ะฒัะฑัะฐัััั ะพั ััะดะฐ,",
"ะะฐะผ ะฟะพะฝะฐะดะพะฑะธััั ะฟัะพะนัะธ ััะธ ััะพะฒะฝั ะบะฐัะฐะบะพะผะฑ ะธ ะฟะพะฑะพัะพัั ะฝะตะฒะธะดะฐะฝะฝัั
ััะดะธั",
"ะัะปะธ ะั ะฟัะธั
, ะฝะฐะถะผะธัะต ะปัะฑัั ะบะฝะพะฟะบั"]
fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level2_screen():
intro_text = ["ะะซ ะกะะะะะ ะะ ะะะขะ ะะะ ะะซะ ะฃะ ะะะะะฌ ะะะะะะะะะฌะฏ",
"ะฝะพ ััะพ ะตัะต ะฝะต ะบะพะฝะตั...",
"ะะฟะตัะตะดะธ ะะฐั ะถะดัั ะตัะต ะฑะพะปะตะต ัะธะปัะฝัะต ะฟัะพัะธะฒะฝะธะบะธ",
"ะธ ะณะพัะฐะทะดะพ ะผะตะฝััะต ัะฐะฝัะพะฒ ะฝะฐ ะฒัะถะธะฒะฐะฝะธะต"]
fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level3_screen():
intro_text = ["ะะะะะะะะ ะะะฅะะะะะ ะขะะ ะะะะะะ",
"ะ ะตัะต ะฝะธะบัะพ ะฝะต ะฒะพะทะฒัะฐัะฐะปัั",
"ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผัะฐัะฝะพะณะพ ะธ ะณะปัะฑะพะบะพะณะพ ััะพะฒะฝั",
"ะกะผะตะปััะฐะบ ะปะธ ัั ะธะปะธ ะฑะตะทัะผะตั?"]
fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def gameover_screen():
intro_text = ["ะ ัะปะตะดัััะธะน ัะฐะท ะฟะพะฒะตะทะตั",
"ะฒะพะทะผะพะถะฝะพ..."]
fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def victory_screen():
intro_text = ["ะญัะพ ะฟะพะฑะตะดะฐ!",
"ะั ัะผะพะณะปะธ ะฒัะฑัะฐัััั ะธะท ะฟะพะดะทะตะผะตะปัั,",
"ััะพ ะพะฑะตัะฟะตัะธั ะะฐะผ ะฑะพะณะฐัััะฒะพ ะธ ัะปะฐะฒั ะฝะฐ ะฒัั ะถะธะทะฝั"]
fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def exit_screen():
button_exit = Button(575, 500, 45, 30, "ะะบ")
name = ''
fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
title = font.render('ะะฐะฟะธัะธ ัะฒะพะต ะธะผั', True, (255, 255, 255))
while True:
sc.blit(fon, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN: | name = ""
if event.type == pygame.MOUSEBUTTONDOWN:
if button_exit.push_button(event.pos):
gameover(name, killed_monsters)
text = font.render(name, True, (255, 255, 255))
rect = text.get_rect()
rect.center = (600, 400)
button_exit.draw()
sc.blit(text, rect)
sc.blit(title, (500, 250))
pygame.display.flip()
def exchange_equipment_inventory(inventory, hero):
obj = inventory.get_selected_cell()
if obj.get_type() == "weapon":
old_w = hero.replace_weapon(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2':
old_w = hero.replace_helmet(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2':
old_w = hero.replace_armor(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2':
old_w = hero.replace_leg(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Arm_armor1 | if event.unicode.isalpha():
name += event.unicode
elif event.key == K_BACKSPACE:
name = name[:-1]
elif event.key == K_RETURN: | random_line_split |
functions.py | _size * 2))
elif cur_level == 2:
texture_wall = load_image("image/stone_wall2.png")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
else:
texture_wall = load_image("image/wall3.jpg")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
return texture_wall
texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg")
texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size))
hp_bar = load_image("image/hud/frame.png")
hp_bar = pygame.transform.scale(hp_bar, (275, 20))
door = load_image("image/castledoors.png")
door = pygame.transform.scale(door, (cell_size, cell_size))
frame = load_image("image/hud/button_1(frame).png")
frame = pygame.transform.scale(frame, (cell_size, cell_size))
blood_screen = load_image("image/BloodOverlay.png")
blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT))
# ัะพะทะดะฐะดะธะผ ะณััะฟะฟั, ัะพะดะตัะถะฐััั ะฒัะต ัะฟัะฐะนัั
all_sprites = pygame.sprite.Group()
equipment_sprites = pygame.sprite.Group()
inventory_sprites = pygame.sprite.Group()
character_sprites = pygame.sprite.Group()
def terminate():
pygame.quit()
sys.exit()
def start_screen():
intro_text = ["ะะะะ ะ ะะะะะะะะะขะฌ ะ PYDUNGEON",
"ะะปั ัะพะณะพ, ััะพะฑั ะฒัะฑัะฐัััั ะพั ััะดะฐ,",
"ะะฐะผ ะฟะพะฝะฐะดะพะฑะธััั ะฟัะพะนัะธ ััะธ ััะพะฒะฝั ะบะฐัะฐะบะพะผะฑ ะธ ะฟะพะฑะพัะพัั ะฝะตะฒะธะดะฐะฝะฝัั
ััะดะธั",
"ะัะปะธ ะั ะฟัะธั
, ะฝะฐะถะผะธัะต ะปัะฑัั ะบะฝะพะฟะบั"]
fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level2_screen():
intro_text = ["ะะซ ะกะะะะะ ะะ ะะะขะ ะะะ ะะซะ ะฃะ ะะะะะฌ ะะะะะะะะะฌะฏ",
"ะฝะพ ััะพ ะตัะต ะฝะต ะบะพะฝะตั...",
"ะะฟะตัะตะดะธ ะะฐั ะถะดัั ะตัะต ะฑะพะปะตะต ัะธะปัะฝัะต ะฟัะพัะธะฒะฝะธะบะธ",
"ะธ ะณะพัะฐะทะดะพ ะผะตะฝััะต ัะฐะฝัะพะฒ ะฝะฐ ะฒัะถะธะฒะฐะฝะธะต"]
fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level3_screen():
intro_text = ["ะะะะะะะะ ะะะฅะะะะะ ะขะะ ะะะะะะ",
"ะ ะตัะต ะฝะธะบัะพ ะฝะต ะฒะพะทะฒัะฐัะฐะปัั",
"ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผัะฐัะฝะพะณะพ ะธ ะณะปัะฑะพะบะพะณะพ ััะพะฒะฝั",
"ะกะผะตะปััะฐะบ ะปะธ ัั ะธะปะธ ะฑะตะทัะผะตั?"]
fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def gameover_screen():
intro_text = ["ะ ัะปะตะดัััะธะน ัะฐะท ะฟะพะฒะตะทะตั",
"ะฒะพะทะผะพะถะฝะพ..."]
fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def victory_screen():
intro_text = ["ะญัะพ ะฟะพะฑะตะดะฐ!",
"ะั ัะผะพะณะปะธ ะฒัะฑัะฐัััั ะธะท ะฟะพะดะทะตะผะตะปัั,",
"ััะพ ะพะฑะตัะฟะตัะธั ะะฐะผ ะฑะพะณะฐัััะฒะพ ะธ ัะปะฐะฒั ะฝะฐ ะฒัั ะถะธะทะฝั"]
fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def exit_screen():
button_exit = Button(575, 500, 45, 30, "ะะบ")
name = ''
fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
title = font.render('ะะฐะฟะธัะธ ัะฒะพะต ะธะผั', True, (255, 255, 255))
while True:
sc.blit(fon, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.unicode.isalpha():
name += event.unicode
elif event.key == K_BACKSPACE:
name = name[:-1]
elif event.key == K_RETURN:
name = ""
if event.type == pygame.MOUSEBUTTONDOWN:
if button_exit.push_button(event.pos):
gameover(name, killed_monsters)
text = font.render(name, True, (255, 255, 255))
rect = text.get_rect()
rect.center = (600, 40 |
button_exit.draw()
sc.blit(text, rect)
sc.blit(title, (500, 250))
pygame.display.flip()
def exchange_equipment_inventory(inventory, hero):
obj = inventory.get_selected_cell()
if obj.get_type() == "weapon":
old_w = hero.replace_weapon(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2':
old_w = hero.replace_helmet(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2':
old_w = hero.replace_armor(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2':
old_w = hero.replace_leg(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Arm_armor | 0)
| conditional_block |
functions.py | * 2))
elif cur_level == 2:
texture_wall = load_image("image/stone_wall2.png")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
else:
texture_wall = load_image("image/wall3.jpg")
texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2))
return texture_wall
texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg")
texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size))
hp_bar = load_image("image/hud/frame.png")
hp_bar = pygame.transform.scale(hp_bar, (275, 20))
door = load_image("image/castledoors.png")
door = pygame.transform.scale(door, (cell_size, cell_size))
frame = load_image("image/hud/button_1(frame).png")
frame = pygame.transform.scale(frame, (cell_size, cell_size))
blood_screen = load_image("image/BloodOverlay.png")
blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT))
# ัะพะทะดะฐะดะธะผ ะณััะฟะฟั, ัะพะดะตัะถะฐััั ะฒัะต ัะฟัะฐะนัั
all_sprites = pygame.sprite.Group()
equipment_sprites = pygame.sprite.Group()
inventory_sprites = pygame.sprite.Group()
character_sprites = pygame.sprite.Group()
def terminate():
pygame.quit()
sys.exit()
def start_screen():
intro_text = ["ะะะะ ะ ะะะะะะะะะขะฌ ะ PYDUNGEON",
"ะะปั ัะพะณะพ, ััะพะฑั ะฒัะฑัะฐัััั ะพั ััะดะฐ,",
"ะะฐะผ ะฟะพะฝะฐะดะพะฑะธััั ะฟัะพะนัะธ ััะธ ััะพะฒะฝั ะบะฐัะฐะบะพะผะฑ ะธ ะฟะพะฑะพัะพัั ะฝะตะฒะธะดะฐะฝะฝัั
ััะดะธั",
"ะัะปะธ ะั ะฟัะธั
, ะฝะฐะถะผะธัะต ะปัะฑัั ะบะฝะพะฟะบั"]
fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level2_screen():
intro_text = ["ะะซ ะกะะะะะ ะะ ะะะขะ ะะะ ะะซะ ะฃะ ะะะะะฌ ะะะะะะะะะฌะฏ",
"ะฝะพ ััะพ ะตัะต ะฝะต ะบะพะฝะตั...",
"ะะฟะตัะตะดะธ ะะฐั ะถะดัั ะตัะต ะฑะพะปะตะต ัะธะปัะฝัะต ะฟัะพัะธะฒะฝะธะบะธ",
"ะธ ะณะพัะฐะทะดะพ ะผะตะฝััะต ัะฐะฝัะพะฒ ะฝะฐ ะฒัะถะธะฒะฐะฝะธะต"]
fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def level3_screen():
intro_text = ["ะะะะะะะะ ะะะฅะะะะะ ะขะะ ะะะะะะ",
"ะ ะตัะต ะฝะธะบัะพ ะฝะต ะฒะพะทะฒัะฐัะฐะปัั",
"ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผัะฐัะฝะพะณะพ ะธ ะณะปัะฑะพะบะพะณะพ ััะพะฒะฝั",
"ะกะผะตะปััะฐะบ ะปะธ ัั ะธะปะธ ะฑะตะทัะผะตั?"]
fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def gameover_screen():
intro_text = ["ะ ัะปะตะดัััะธะน ัะฐะท ะฟะพะฒะตะทะตั",
"ะฒะพะทะผะพะถะฝะพ..."]
fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def victory_screen():
intro_text = ["ะญัะพ ะฟะพะฑะตะดะฐ!",
"ะั ัะผะพะณะปะธ ะฒัะฑัะฐัััั ะธะท ะฟะพะดะทะตะผะตะปัั,",
"ััะพ ะพะฑะตัะฟะตัะธั ะะฐะผ ะฑะพะณะฐัััะฒะพ ะธ ัะปะฐะฒั ะฝะฐ ะฒัั ะถะธะทะฝั"]
fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 700
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('yellow'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
sc.blit(string_rendered, intro_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or \
event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
def exit_screen():
button_exit = Button(575, 500, 45, 30, "ะะบ")
name = ''
fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT))
sc.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
title = font.render('ะะฐะฟะธัะธ ัะฒะพะต ะธะผั', True, (255, 255, 255))
while True:
sc.blit(fon, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.unicode.isalpha():
name += event.unicode
elif event.key == K_BACKSPACE:
name = name[:-1]
elif event.key == K_RETURN:
name = ""
if event.type == pygame.MOUSEBUTTONDOWN:
if button_exit.push_button(event.pos):
gameover(name, killed_monsters)
text = font.render(name, True, (255, 255, 255))
rect = text.get_rect()
rect.center = (600, 400)
button_exit.draw()
sc.blit(text, rect)
sc.blit(title, (500, 250))
pygame.display.flip()
def exchange_equipment_inventory(inventory, hero):
obj = inventory.get_selected_cell()
if obj.get_type() == "weapon":
old_w = hero.replace_weapon(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2':
old_w = hero.replace_helmet(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2':
old_w = hero.replace_armor(obj)
inv | ted_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2':
old_w = hero.replace_leg(obj)
inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w
if obj.get_name() == " | entory.board[inventory.selec | identifier_name |
machine.go | != nil {
if !apierrors.IsNotFound(err) {
return nil, err
}
} else {
machine.vmInstance = vm
}
return machine, nil
}
// IsTerminal Reports back if the VM is either being requested to terminate or is terminate
// in a way that it will never recover from.
func (m *Machine) IsTerminal() (bool, string, error) {
if m.vmInstance == nil || m.vmiInstance == nil {
// vm/vmi hasn't been created yet
return false, "", nil
}
// VMI is being asked to terminate gracefully due to node drain
if !m.vmiInstance.IsFinal() &&
!m.vmiInstance.IsMigratable() &&
m.vmiInstance.Status.EvacuationNodeName != "" {
// VM's infra node is being drained and VM is not live migratable.
// We need to report a FailureReason so the MachineHealthCheck and
// MachineSet controllers will gracefully take the VM down.
return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil
}
// The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark
// a VM as being in a terminal state that requires remediation. This is used
// by the functional test suite to test remediation and can also be triggered
// by users as a way to manually trigger remediation.
terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]
if ok {
return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil
}
// Also check the VMI for this label
terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]
if ok {
return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil
}
runStrategy, err := m.vmInstance.RunStrategy()
if err != nil {
return false, "", err
}
switch runStrategy {
case kubevirtv1.RunStrategyAlways:
// VM should recover if it is down.
return false, "", nil
case kubevirtv1.RunStrategyManual:
// If VM is manually controlled, we stay out of the loop
return false, "", nil
case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce:
if m.vmiInstance.IsFinal() {
return true, "VMI has reached a permanent finalized state", nil
}
return false, "", nil
case kubevirtv1.RunStrategyRerunOnFailure:
// only recovers when vmi is failed
if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded {
return true, "VMI has reached a permanent finalized state", nil
}
return false, "", nil
}
return false, "", nil
}
// Exists checks if the VM has been provisioned already.
func (m *Machine) Exists() bool {
return m.vmInstance != nil
}
// Create creates a new VM for this machine.
func (m *Machine) Create(ctx gocontext.Context) error {
m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext)))
virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace)
mutateFn := func() (err error) {
if virtualMachine.Labels == nil {
virtualMachine.Labels = map[string]string{}
}
if virtualMachine.Spec.Template.ObjectMeta.Labels == nil {
virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{}
}
virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name
virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
return nil
}
if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil {
return err
}
return nil
}
// Returns if VMI has ready condition or not.
func (m *Machine) hasReadyCondition() bool {
if m.vmiInstance == nil {
return false
}
for _, cond := range m.vmiInstance.Status.Conditions {
if cond.Type == kubevirtv1.VirtualMachineInstanceReady &&
cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// Address returns the IP address of the VM.
func (m *Machine) Address() string {
if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 {
return m.vmiInstance.Status.Interfaces[0].IP
}
return ""
}
// IsReady checks if the VM is ready
func (m *Machine) IsReady() bool {
return m.hasReadyCondition()
}
// SupportsCheckingIsBootstrapped checks if we have a method of checking
// that this bootstrapper has completed.
func (m *Machine) SupportsCheckingIsBootstrapped() bool {
// Right now, we can only check if bootstrapping has
// completed if we are using a bootstrapper that allows
// for us to inject ssh keys into the guest.
if m.sshKeys != nil {
return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey)
}
return false
}
// IsBootstrapped checks if the VM is bootstrapped with Kubernetes.
func (m *Machine) IsBootstrapped() bool {
// CheckStrategy value is already sanitized by apiserver
switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy {
case "none":
// skip bootstrap check and always returns positively
return true
case "":
fallthrough // ssh is default check strategy, fallthrough
case "ssh":
return m.IsBootstrappedWithSSH()
default:
// Since CRD CheckStrategy field is validated by an enum, this case should never be hit
return false
}
}
// IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy.
func (m *Machine) IsBootstrappedWithSSH() bool {
if !m.IsReady() || m.sshKeys == nil {
return false
}
executor := m.getCommandExecutor(m.Address(), m.sshKeys)
output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete")
if err != nil || output != "success" {
return false
}
return true
}
// GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef
func (m *Machine) GenerateProviderID() (string, error) {
if m.vmiInstance == nil {
return "", errors.New("Underlying Kubevirt VM is NOT running")
}
providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name)
return providerID, nil
}
// Delete deletes VM for this machine.
func (m *Machine) Delete() error {
namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name}
vm := &kubevirtv1.VirtualMachine{}
if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil {
if apierrors.IsNotFound(err) {
m.machineContext.Logger.Info("VM does not exist, nothing to do.")
return nil
}
return errors.Wrapf(err, "failed to retrieve VM to delete")
}
if err := m.client.Delete(gocontext.Background(), vm); err != nil {
return errors.Wrapf(err, "failed to delete VM")
}
return nil
}
func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) {
if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() {
if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists {
if err := m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
}
return 0, nil
}
exceeded, err := m.drainGracePeriodExceeded()
if err != nil {
return 0, err
}
if !exceeded { | return 0, err
}
if retryDuration > 0 {
return retryDuration, nil
}
}
// now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI
propagationPolicy := metav1.DeletePropagationForeground
err = m.client.Delete(m | retryDuration, err := m.drainNode(wrkldClstr)
if err != nil { | random_line_split |
machine.go | , "", nil
}
return false, "", nil
}
// Exists checks if the VM has been provisioned already.
func (m *Machine) Exists() bool {
return m.vmInstance != nil
}
// Create creates a new VM for this machine.
func (m *Machine) Create(ctx gocontext.Context) error {
m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext)))
virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace)
mutateFn := func() (err error) {
if virtualMachine.Labels == nil {
virtualMachine.Labels = map[string]string{}
}
if virtualMachine.Spec.Template.ObjectMeta.Labels == nil {
virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{}
}
virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name
virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
return nil
}
if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil {
return err
}
return nil
}
// Returns if VMI has ready condition or not.
func (m *Machine) hasReadyCondition() bool {
if m.vmiInstance == nil {
return false
}
for _, cond := range m.vmiInstance.Status.Conditions {
if cond.Type == kubevirtv1.VirtualMachineInstanceReady &&
cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// Address returns the IP address of the VM.
func (m *Machine) Address() string {
if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 {
return m.vmiInstance.Status.Interfaces[0].IP
}
return ""
}
// IsReady checks if the VM is ready
func (m *Machine) IsReady() bool {
return m.hasReadyCondition()
}
// SupportsCheckingIsBootstrapped checks if we have a method of checking
// that this bootstrapper has completed.
func (m *Machine) SupportsCheckingIsBootstrapped() bool {
// Right now, we can only check if bootstrapping has
// completed if we are using a bootstrapper that allows
// for us to inject ssh keys into the guest.
if m.sshKeys != nil {
return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey)
}
return false
}
// IsBootstrapped checks if the VM is bootstrapped with Kubernetes.
func (m *Machine) IsBootstrapped() bool {
// CheckStrategy value is already sanitized by apiserver
switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy {
case "none":
// skip bootstrap check and always returns positively
return true
case "":
fallthrough // ssh is default check strategy, fallthrough
case "ssh":
return m.IsBootstrappedWithSSH()
default:
// Since CRD CheckStrategy field is validated by an enum, this case should never be hit
return false
}
}
// IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy.
func (m *Machine) IsBootstrappedWithSSH() bool {
if !m.IsReady() || m.sshKeys == nil {
return false
}
executor := m.getCommandExecutor(m.Address(), m.sshKeys)
output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete")
if err != nil || output != "success" {
return false
}
return true
}
// GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef
func (m *Machine) GenerateProviderID() (string, error) {
if m.vmiInstance == nil {
return "", errors.New("Underlying Kubevirt VM is NOT running")
}
providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name)
return providerID, nil
}
// Delete deletes VM for this machine.
func (m *Machine) Delete() error {
namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name}
vm := &kubevirtv1.VirtualMachine{}
if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil {
if apierrors.IsNotFound(err) {
m.machineContext.Logger.Info("VM does not exist, nothing to do.")
return nil
}
return errors.Wrapf(err, "failed to retrieve VM to delete")
}
if err := m.client.Delete(gocontext.Background(), vm); err != nil {
return errors.Wrapf(err, "failed to delete VM")
}
return nil
}
func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) {
if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() {
if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists {
if err := m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
}
return 0, nil
}
exceeded, err := m.drainGracePeriodExceeded()
if err != nil {
return 0, err
}
if !exceeded {
retryDuration, err := m.drainNode(wrkldClstr)
if err != nil {
return 0, err
}
if retryDuration > 0 {
return retryDuration, nil
}
}
// now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI
propagationPolicy := metav1.DeletePropagationForeground
err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy})
if err != nil {
m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance")
return 0, err
}
if err = m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
// requeue to force reading the VMI again
return time.Second * 10, nil
}
const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]`
func (m *Machine) removeGracePeriodAnnotation() error {
patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch))
if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil {
return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err)
}
return nil
}
func (m *Machine) shouldGracefulDeleteVMI() bool {
if m.vmiInstance.DeletionTimestamp != nil {
m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here")
return false
}
if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal {
m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here")
return false
}
// KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is
// nothing to do.
if len(m.vmiInstance.Status.EvacuationNodeName) == 0 {
m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here")
return false
}
return true
}
// wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore.
func (m *Machine) drainGracePeriodExceeded() (bool, error) {
if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found {
deletionGraceTime, err := time.Parse(time.RFC3339, graceTime)
if err != nil { // wrong format - rewrite
if err = m.setVmiDeletionGraceTime(); err != nil {
return false, err
}
} else {
return time.Now().UTC().After(deletionGraceTime), nil
}
} else {
if err := m.setVmiDeletionGraceTime(); err != nil {
return false, err
}
}
return false, nil
}
func (m *Machine) | setVmiDeletionGraceTime | identifier_name |
|
machine.go | != nil {
if !apierrors.IsNotFound(err) {
return nil, err
}
} else {
machine.vmInstance = vm
}
return machine, nil
}
// IsTerminal Reports back if the VM is either being requested to terminate or is terminate
// in a way that it will never recover from.
func (m *Machine) IsTerminal() (bool, string, error) {
if m.vmInstance == nil || m.vmiInstance == nil {
// vm/vmi hasn't been created yet
return false, "", nil
}
// VMI is being asked to terminate gracefully due to node drain
if !m.vmiInstance.IsFinal() &&
!m.vmiInstance.IsMigratable() &&
m.vmiInstance.Status.EvacuationNodeName != "" {
// VM's infra node is being drained and VM is not live migratable.
// We need to report a FailureReason so the MachineHealthCheck and
// MachineSet controllers will gracefully take the VM down.
return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil
}
// The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark
// a VM as being in a terminal state that requires remediation. This is used
// by the functional test suite to test remediation and can also be triggered
// by users as a way to manually trigger remediation.
terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]
if ok {
return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil
}
// Also check the VMI for this label
terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]
if ok {
return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil
}
runStrategy, err := m.vmInstance.RunStrategy()
if err != nil |
switch runStrategy {
case kubevirtv1.RunStrategyAlways:
// VM should recover if it is down.
return false, "", nil
case kubevirtv1.RunStrategyManual:
// If VM is manually controlled, we stay out of the loop
return false, "", nil
case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce:
if m.vmiInstance.IsFinal() {
return true, "VMI has reached a permanent finalized state", nil
}
return false, "", nil
case kubevirtv1.RunStrategyRerunOnFailure:
// only recovers when vmi is failed
if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded {
return true, "VMI has reached a permanent finalized state", nil
}
return false, "", nil
}
return false, "", nil
}
// Exists checks if the VM has been provisioned already.
func (m *Machine) Exists() bool {
return m.vmInstance != nil
}
// Create creates a new VM for this machine.
func (m *Machine) Create(ctx gocontext.Context) error {
m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext)))
virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace)
mutateFn := func() (err error) {
if virtualMachine.Labels == nil {
virtualMachine.Labels = map[string]string{}
}
if virtualMachine.Spec.Template.ObjectMeta.Labels == nil {
virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{}
}
virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name
virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
return nil
}
if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil {
return err
}
return nil
}
// Returns if VMI has ready condition or not.
func (m *Machine) hasReadyCondition() bool {
if m.vmiInstance == nil {
return false
}
for _, cond := range m.vmiInstance.Status.Conditions {
if cond.Type == kubevirtv1.VirtualMachineInstanceReady &&
cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// Address returns the IP address of the VM.
func (m *Machine) Address() string {
if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 {
return m.vmiInstance.Status.Interfaces[0].IP
}
return ""
}
// IsReady checks if the VM is ready
func (m *Machine) IsReady() bool {
return m.hasReadyCondition()
}
// SupportsCheckingIsBootstrapped checks if we have a method of checking
// that this bootstrapper has completed.
func (m *Machine) SupportsCheckingIsBootstrapped() bool {
// Right now, we can only check if bootstrapping has
// completed if we are using a bootstrapper that allows
// for us to inject ssh keys into the guest.
if m.sshKeys != nil {
return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey)
}
return false
}
// IsBootstrapped checks if the VM is bootstrapped with Kubernetes.
func (m *Machine) IsBootstrapped() bool {
// CheckStrategy value is already sanitized by apiserver
switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy {
case "none":
// skip bootstrap check and always returns positively
return true
case "":
fallthrough // ssh is default check strategy, fallthrough
case "ssh":
return m.IsBootstrappedWithSSH()
default:
// Since CRD CheckStrategy field is validated by an enum, this case should never be hit
return false
}
}
// IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy.
func (m *Machine) IsBootstrappedWithSSH() bool {
if !m.IsReady() || m.sshKeys == nil {
return false
}
executor := m.getCommandExecutor(m.Address(), m.sshKeys)
output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete")
if err != nil || output != "success" {
return false
}
return true
}
// GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef
func (m *Machine) GenerateProviderID() (string, error) {
if m.vmiInstance == nil {
return "", errors.New("Underlying Kubevirt VM is NOT running")
}
providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name)
return providerID, nil
}
// Delete deletes VM for this machine.
func (m *Machine) Delete() error {
namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name}
vm := &kubevirtv1.VirtualMachine{}
if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil {
if apierrors.IsNotFound(err) {
m.machineContext.Logger.Info("VM does not exist, nothing to do.")
return nil
}
return errors.Wrapf(err, "failed to retrieve VM to delete")
}
if err := m.client.Delete(gocontext.Background(), vm); err != nil {
return errors.Wrapf(err, "failed to delete VM")
}
return nil
}
func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) {
if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() {
if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists {
if err := m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
}
return 0, nil
}
exceeded, err := m.drainGracePeriodExceeded()
if err != nil {
return 0, err
}
if !exceeded {
retryDuration, err := m.drainNode(wrkldClstr)
if err != nil {
return 0, err
}
if retryDuration > 0 {
return retryDuration, nil
}
}
// now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI
propagationPolicy := metav1.DeletePropagationForeground
err = m.client | {
return false, "", err
} | conditional_block |
machine.go | ] = m.machineContext.Cluster.Name
virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name
virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace
return nil
}
if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil {
return err
}
return nil
}
// Returns if VMI has ready condition or not.
func (m *Machine) hasReadyCondition() bool {
if m.vmiInstance == nil {
return false
}
for _, cond := range m.vmiInstance.Status.Conditions {
if cond.Type == kubevirtv1.VirtualMachineInstanceReady &&
cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// Address returns the IP address of the VM.
func (m *Machine) Address() string {
if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 {
return m.vmiInstance.Status.Interfaces[0].IP
}
return ""
}
// IsReady checks if the VM is ready
func (m *Machine) IsReady() bool {
return m.hasReadyCondition()
}
// SupportsCheckingIsBootstrapped checks if we have a method of checking
// that this bootstrapper has completed.
func (m *Machine) SupportsCheckingIsBootstrapped() bool {
// Right now, we can only check if bootstrapping has
// completed if we are using a bootstrapper that allows
// for us to inject ssh keys into the guest.
if m.sshKeys != nil {
return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey)
}
return false
}
// IsBootstrapped checks if the VM is bootstrapped with Kubernetes.
func (m *Machine) IsBootstrapped() bool {
// CheckStrategy value is already sanitized by apiserver
switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy {
case "none":
// skip bootstrap check and always returns positively
return true
case "":
fallthrough // ssh is default check strategy, fallthrough
case "ssh":
return m.IsBootstrappedWithSSH()
default:
// Since CRD CheckStrategy field is validated by an enum, this case should never be hit
return false
}
}
// IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy.
func (m *Machine) IsBootstrappedWithSSH() bool {
if !m.IsReady() || m.sshKeys == nil {
return false
}
executor := m.getCommandExecutor(m.Address(), m.sshKeys)
output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete")
if err != nil || output != "success" {
return false
}
return true
}
// GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef
func (m *Machine) GenerateProviderID() (string, error) {
if m.vmiInstance == nil {
return "", errors.New("Underlying Kubevirt VM is NOT running")
}
providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name)
return providerID, nil
}
// Delete deletes VM for this machine.
func (m *Machine) Delete() error {
namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name}
vm := &kubevirtv1.VirtualMachine{}
if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil {
if apierrors.IsNotFound(err) {
m.machineContext.Logger.Info("VM does not exist, nothing to do.")
return nil
}
return errors.Wrapf(err, "failed to retrieve VM to delete")
}
if err := m.client.Delete(gocontext.Background(), vm); err != nil {
return errors.Wrapf(err, "failed to delete VM")
}
return nil
}
func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) {
if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() {
if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists {
if err := m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
}
return 0, nil
}
exceeded, err := m.drainGracePeriodExceeded()
if err != nil {
return 0, err
}
if !exceeded {
retryDuration, err := m.drainNode(wrkldClstr)
if err != nil {
return 0, err
}
if retryDuration > 0 {
return retryDuration, nil
}
}
// now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI
propagationPolicy := metav1.DeletePropagationForeground
err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy})
if err != nil {
m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance")
return 0, err
}
if err = m.removeGracePeriodAnnotation(); err != nil {
return 100 * time.Millisecond, err
}
// requeue to force reading the VMI again
return time.Second * 10, nil
}
const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]`
func (m *Machine) removeGracePeriodAnnotation() error {
patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch))
if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil {
return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err)
}
return nil
}
func (m *Machine) shouldGracefulDeleteVMI() bool {
if m.vmiInstance.DeletionTimestamp != nil {
m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here")
return false
}
if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal {
m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here")
return false
}
// KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is
// nothing to do.
if len(m.vmiInstance.Status.EvacuationNodeName) == 0 {
m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here")
return false
}
return true
}
// wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore.
func (m *Machine) drainGracePeriodExceeded() (bool, error) {
if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found {
deletionGraceTime, err := time.Parse(time.RFC3339, graceTime)
if err != nil { // wrong format - rewrite
if err = m.setVmiDeletionGraceTime(); err != nil {
return false, err
}
} else {
return time.Now().UTC().After(deletionGraceTime), nil
}
} else {
if err := m.setVmiDeletionGraceTime(); err != nil {
return false, err
}
}
return false, nil
}
func (m *Machine) setVmiDeletionGraceTime() error | {
m.machineContext.Logger.Info(fmt.Sprintf("setting the %s annotation", infrav1.VmiDeletionGraceTime))
graceTime := time.Now().Add(vmiDeleteGraceTimeoutDurationSeconds * time.Second).UTC().Format(time.RFC3339)
patch := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, infrav1.VmiDeletionGraceTime, graceTime)
patchRequest := client.RawPatch(types.MergePatchType, []byte(patch))
if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patchRequest); err != nil {
return fmt.Errorf("failed to add the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err)
}
return nil
} | identifier_body |
|
header.go | , value := range header{
if ( srtcolindexInt==key){
sortColName = value
}
}
fmt.Println(sortColName)
tconf["ActionCol"] ="true" // config for action column
tconf["ActionCol_param"] ="ID" // config for parameter of action
tconf["ActionCol_edit"] ="true" // config for edit click
tconf["ActionCol_edit_is_modal"] ="false" // config for edit click
//tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click
tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click
tconf["ActionCol_delete"] ="true" // config for delete click
tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id="
tconf["ActionCol_detail"] ="false" // config for details click
tconf["ActionCol_add_child"] ="false" // config for addchild click
tconf["style_size"] ="12"
tconf["style_font"] =""
tconf["format_number_col"] ="Amount"
tconf["dr"]=dr //draw
//geting total record etc
// rights for tables
if( strings.Contains(dec_rights, "HEdit") ){
tconf["ActionCol_edit"] ="true"
}
if( strings.Contains(dec_rights, "HDelete") ){
tconf["ActionCol_delete"] ="true" // config for delete click
}
if( strings.Contains(dec_rights, "HDetails") ){
tconf["ActionCol_detail"] ="true" // config for delete click
}
//end rights for tables
//_,session_user_id := login.Get_account_info(r)
session_user_id := 1 //static here
session_user_id_str := strconv.Itoa(session_user_id)
fmt.Println(session_user_id_str)
/* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " )
fmt.Println(sortColName,str,sr)*/
//fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`)
//tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
//tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
datatables.DatatableretArray(w,tconf)
}
}
type RetMessage struct {
RetNo int
RetMsg string
}
func DoAdd(branch string, date string, remarks string , username string) (bool ,int) {
//rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1)
rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1)
if err != nil {
panic(err.Error())
}
var r RetMessage
if err != nil {
fmt.Println(err.Error)
panic(err.Error())
}
for rowret.Next() {
err = rowret.Scan(&r.RetNo,&r.RetMsg)
if err != nil {
panic(err.Error())
}
r = RetMessage{r.RetNo,r.RetMsg}
}
if( strings.Contains(r.RetMsg, "Success") ){
return true ,r.RetNo
}else{
return false ,r.RetNo
}
}
var local_FuncMap = template.FuncMap{
"Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{
fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
return retdata
},
}
func HAddHandler(w http.ResponseWriter, r *http.Request) {
login.Session_renew(w,r)
username, ses_uID := login.Get_account_info(r)
//username := "static"
//ses_uID := 1
str_ses_uID :=strconv.Itoa(ses_uID)
rights :=r.URL.Query().Get("rights")
fmt.Println(str_ses_uID)
//fmt.Fprint(w,rights)
if r.Method =="GET" {
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf := make(map[string]string)
//tconf["parentID"] =r.URL.Query().Get("parentID")
tconf["username"] =username
tconf["org_id"] = str_OrgID | tconf["test_js"] = `alert("from webserver")`
arr_sysrole := datatables.DataList(`sysrole_get 2`)
type Data struct {
Rights string
Conf map[string]string
Arr_Sysrole [][]string
}
tmpl := template.New("Hadd.html").Funcs(local_FuncMap)
var err error
if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil {
fmt.Println(err)
}
err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole})
if err1 != nil {
http.Error(w, err1.Error(), http.StatusInternalServerError)
}
}else {
r.ParseForm()
add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username)
if(add_status){
str_lastinsertedID :=strconv.Itoa(lastinsertedID)
profile := Profile{"Data Successfully added ",true ,str_lastinsertedID}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Data Successfully added")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}else{
profile := Profile{"Oppsss something went wrong",false,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Oppsss something went wrong")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
}
func HAddTagHandler(w http.ResponseWriter, r *http.Request) {
if r.Method =="POST" {
r.ParseForm()
item_id := r.Form["item_id"][0]
username, _ := login.Get_account_info(r)
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
var returnData[] string
for key ,_ := range r.Form["tag"] {
tag := r.Form["tag"][key]
value_input := r.Form["value_input"][key]
remarks := r.Form["remarks"][key]
if(value_input!=""){
arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` )
fmt.Println(arr_data_itemtag)
returnData | random_line_split |
|
header.go | .New("Hadd.html").Funcs(local_FuncMap)
var err error
if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil {
fmt.Println(err)
}
err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole})
if err1 != nil {
http.Error(w, err1.Error(), http.StatusInternalServerError)
}
}else {
r.ParseForm()
add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username)
if(add_status){
str_lastinsertedID :=strconv.Itoa(lastinsertedID)
profile := Profile{"Data Successfully added ",true ,str_lastinsertedID}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Data Successfully added")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}else{
profile := Profile{"Oppsss something went wrong",false,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Oppsss something went wrong")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
}
func HAddTagHandler(w http.ResponseWriter, r *http.Request) {
if r.Method =="POST" {
r.ParseForm()
item_id := r.Form["item_id"][0]
username, _ := login.Get_account_info(r)
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
var returnData[] string
for key ,_ := range r.Form["tag"] {
tag := r.Form["tag"][key]
value_input := r.Form["value_input"][key]
remarks := r.Form["remarks"][key]
if(value_input!=""){
arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` )
fmt.Println(arr_data_itemtag)
returnData = append(returnData, arr_data_itemtag[0])
returnData = append(returnData, arr_data_itemtag[1])
if(strings.Contains(arr_data_itemtag[1] , `Error`)){
returnData = append(returnData, `HasError`)
}
}
}
js, err := json.Marshal(returnData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
//edit here
type LBR_OTHdr struct{
ID int
Status string
Trandate interface{}
Lbr_assign int
Remarks interface{}
}
type Dailysumhdr_get struct{
ID int
Branch interface{}
Docdate interface{}
Remarks interface{}
}
func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get {
//db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1)
db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1)
if err != nil {
panic(err.Error())
}
var r Dailysumhdr_get
for db_raw.Next() {
err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks)
if err != nil {
panic(err.Error())
}
}
return r
}
func HEditHandler(w http.ResponseWriter, r *http.Request) {
login.Session_renew(w,r)
rights :=r.URL.Query().Get("rights")
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
//rights :="rights"
if r.Method =="GET" {
username, _ := login.Get_account_info(r)
tconf := make(map[string]string)
tconf["h_id"] =r.URL.Query().Get("h_id")
tconf["rights"]=rights
tconf["username"] = username
tconf["org_id"] = str_OrgID
//tconf["data"] = datatables.DataList(sql)
//tconf["Assign_ID"] = strconv.Itoa( LBR_LogHdr_Get_id(tconf["h_id"]).Lbr_assign )
//_,session_user_id := login.Get_account_info(r)
session_user_id := 1 //static here
session_user_id_str := strconv.Itoa(session_user_id)
fmt.Println(session_user_id_str)
type Data struct {
Rights string
Conf map[string]string
}
//arr_data := datatables.Data_row(`exec branch_get 1, 1,`+ tconf["h_id"] )
arr_data := datatables.Data_row(`select orgMem.id orgmemID ,orgMem.member MemberID , orgMem.status, orgMem.remarks , lastname, firstname,middlename,Username,txdate
from member inner join orgMem on orgMem.member=member.id
where orgMem.id= `+tconf["h_id"] )
fmt.Println(arr_data)
tconf["orgmemID"] = arr_data[0]
tconf["memberID"] = arr_data[1]
tconf["status"] = arr_data[2]
tconf["remarks"] = arr_data[3]
tconf["lastname"] = arr_data[4]
tconf["firstname"] = arr_data[5]
tconf["middlename"] = arr_data[6]
tconf["username"] = arr_data[7]
tconf["txdate"] = arr_data[8]
//arr_data_itemclass := datatables.DataList(`select id,dbo.SIS_Itemclass_Name(1, 3, id) [itemclass_name] from sis_itemclass` )
//arr_data_supplier := datatables.DataList(`select id,dbo.SIS_Supplier_Name(1, 3, id) [supplier_name] from SIS_Supplier` )
tmpl := template.New("Hedit.html").Funcs(local_FuncMap)
var err error
if tmpl, err = tmpl.ParseFiles("admin/member_role/Hedit.html"); err != nil {
fmt.Println(err)
}
err1 := tmpl.Execute( w,&Data{rights,tconf} )
if err1 != nil {
http.Error(w, err1.Error(), http.StatusInternalServerError)
}
}else{
//session_username := `static`
r.ParseForm()
//_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3)
//_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3)
_ ,err, ex_stat,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', `+r.Form["h_id"][0] +` , 1, `+r.Form["branch"][0]+`, '`+r.Form["date"][0]+`', '`+r.Form["remarks"][0]+`'`,3)
//exec LBR_LogHdr_Save 'Edit', 'Ian', 3, '11 Jul 2016', 1, 'logfile abc', 'device abc', 'remarks abc'
if err != nil {
fmt.Println(err.Error)
panic(err.Error())
}
//lastinsertedID, _ := res.LastInsertId()
if ex_stat == true{
profile := Profile{"Data Successfully Update ",true ,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Data Successfully Edited")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}else{
profile := Profile{"Oppsss something went wrong",false,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Oppsss something went wrong")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
}
func | HDeleteHandler | identifier_name |
|
header.go | ){
tconf["ActionCol_delete"] ="true" // config for delete click
}
if( strings.Contains(dec_rights, "HDetails") ){
tconf["ActionCol_detail"] ="true" // config for delete click
}
//end rights for tables
//_,session_user_id := login.Get_account_info(r)
session_user_id := 1 //static here
session_user_id_str := strconv.Itoa(session_user_id)
fmt.Println(session_user_id_str)
/* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " )
fmt.Println(sortColName,str,sr)*/
//fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`)
//tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
//tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
datatables.DatatableretArray(w,tconf)
}
}
type RetMessage struct {
RetNo int
RetMsg string
}
func DoAdd(branch string, date string, remarks string , username string) (bool ,int) {
//rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1)
rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1)
if err != nil {
panic(err.Error())
}
var r RetMessage
if err != nil {
fmt.Println(err.Error)
panic(err.Error())
}
for rowret.Next() {
err = rowret.Scan(&r.RetNo,&r.RetMsg)
if err != nil {
panic(err.Error())
}
r = RetMessage{r.RetNo,r.RetMsg}
}
if( strings.Contains(r.RetMsg, "Success") ){
return true ,r.RetNo
}else{
return false ,r.RetNo
}
}
var local_FuncMap = template.FuncMap{
"Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{
fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
return retdata
},
}
func HAddHandler(w http.ResponseWriter, r *http.Request) {
login.Session_renew(w,r)
username, ses_uID := login.Get_account_info(r)
//username := "static"
//ses_uID := 1
str_ses_uID :=strconv.Itoa(ses_uID)
rights :=r.URL.Query().Get("rights")
fmt.Println(str_ses_uID)
//fmt.Fprint(w,rights)
if r.Method =="GET" {
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf := make(map[string]string)
//tconf["parentID"] =r.URL.Query().Get("parentID")
tconf["username"] =username
tconf["org_id"] = str_OrgID
tconf["test_js"] = `alert("from webserver")`
arr_sysrole := datatables.DataList(`sysrole_get 2`)
type Data struct {
Rights string
Conf map[string]string
Arr_Sysrole [][]string
}
tmpl := template.New("Hadd.html").Funcs(local_FuncMap)
var err error
if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil {
fmt.Println(err)
}
err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole})
if err1 != nil {
http.Error(w, err1.Error(), http.StatusInternalServerError)
}
}else {
r.ParseForm()
add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username)
if(add_status){
str_lastinsertedID :=strconv.Itoa(lastinsertedID)
profile := Profile{"Data Successfully added ",true ,str_lastinsertedID}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Data Successfully added")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}else{
profile := Profile{"Oppsss something went wrong",false,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Oppsss something went wrong")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
}
func HAddTagHandler(w http.ResponseWriter, r *http.Request) {
if r.Method =="POST" {
r.ParseForm()
item_id := r.Form["item_id"][0]
username, _ := login.Get_account_info(r)
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
var returnData[] string
for key ,_ := range r.Form["tag"] {
tag := r.Form["tag"][key]
value_input := r.Form["value_input"][key]
remarks := r.Form["remarks"][key]
if(value_input!=""){
arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` )
fmt.Println(arr_data_itemtag)
returnData = append(returnData, arr_data_itemtag[0])
returnData = append(returnData, arr_data_itemtag[1])
if(strings.Contains(arr_data_itemtag[1] , `Error`)){
returnData = append(returnData, `HasError`)
}
}
}
js, err := json.Marshal(returnData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
//edit here
type LBR_OTHdr struct{
ID int
Status string
Trandate interface{}
Lbr_assign int
Remarks interface{}
}
type Dailysumhdr_get struct{
ID int
Branch interface{}
Docdate interface{}
Remarks interface{}
}
func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get | {
//db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1)
db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1)
if err != nil {
panic(err.Error())
}
var r Dailysumhdr_get
for db_raw.Next() {
err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks)
if err != nil {
panic(err.Error())
}
}
return r
} | identifier_body |
|
header.go | Col_param"] ="ID" // config for parameter of action
tconf["ActionCol_edit"] ="true" // config for edit click
tconf["ActionCol_edit_is_modal"] ="false" // config for edit click
//tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click
tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click
tconf["ActionCol_delete"] ="true" // config for delete click
tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id="
tconf["ActionCol_detail"] ="false" // config for details click
tconf["ActionCol_add_child"] ="false" // config for addchild click
tconf["style_size"] ="12"
tconf["style_font"] =""
tconf["format_number_col"] ="Amount"
tconf["dr"]=dr //draw
//geting total record etc
// rights for tables
if( strings.Contains(dec_rights, "HEdit") ){
tconf["ActionCol_edit"] ="true"
}
if( strings.Contains(dec_rights, "HDelete") ){
tconf["ActionCol_delete"] ="true" // config for delete click
}
if( strings.Contains(dec_rights, "HDetails") ){
tconf["ActionCol_detail"] ="true" // config for delete click
}
//end rights for tables
//_,session_user_id := login.Get_account_info(r)
session_user_id := 1 //static here
session_user_id_str := strconv.Itoa(session_user_id)
fmt.Println(session_user_id_str)
/* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " )
fmt.Println(sortColName,str,sr)*/
//fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`)
//tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`
//tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
//tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'`
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'`
datatables.DatatableretArray(w,tconf)
}
}
type RetMessage struct {
RetNo int
RetMsg string
}
func DoAdd(branch string, date string, remarks string , username string) (bool ,int) {
//rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1)
rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1)
if err != nil {
panic(err.Error())
}
var r RetMessage
if err != nil {
fmt.Println(err.Error)
panic(err.Error())
}
for rowret.Next() {
err = rowret.Scan(&r.RetNo,&r.RetMsg)
if err != nil {
panic(err.Error())
}
r = RetMessage{r.RetNo,r.RetMsg}
}
if( strings.Contains(r.RetMsg, "Success") ){
return true ,r.RetNo
}else{
return false ,r.RetNo
}
}
var local_FuncMap = template.FuncMap{
"Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{
fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id)
return retdata
},
}
func HAddHandler(w http.ResponseWriter, r *http.Request) {
login.Session_renew(w,r)
username, ses_uID := login.Get_account_info(r)
//username := "static"
//ses_uID := 1
str_ses_uID :=strconv.Itoa(ses_uID)
rights :=r.URL.Query().Get("rights")
fmt.Println(str_ses_uID)
//fmt.Fprint(w,rights)
if r.Method =="GET" {
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
tconf := make(map[string]string)
//tconf["parentID"] =r.URL.Query().Get("parentID")
tconf["username"] =username
tconf["org_id"] = str_OrgID
tconf["test_js"] = `alert("from webserver")`
arr_sysrole := datatables.DataList(`sysrole_get 2`)
type Data struct {
Rights string
Conf map[string]string
Arr_Sysrole [][]string
}
tmpl := template.New("Hadd.html").Funcs(local_FuncMap)
var err error
if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil {
fmt.Println(err)
}
err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole})
if err1 != nil {
http.Error(w, err1.Error(), http.StatusInternalServerError)
}
}else {
r.ParseForm()
add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username)
if(add_status){
str_lastinsertedID :=strconv.Itoa(lastinsertedID)
profile := Profile{"Data Successfully added ",true ,str_lastinsertedID}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Data Successfully added")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}else{
profile := Profile{"Oppsss something went wrong",false,""}
js, err := json.Marshal(profile)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("Oppsss something went wrong")
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
}
}
func HAddTagHandler(w http.ResponseWriter, r *http.Request) {
if r.Method =="POST" | {
r.ParseForm()
item_id := r.Form["item_id"][0]
username, _ := login.Get_account_info(r)
Org_id :=login.Get_session_org_id(r)
str_OrgID :=strconv.Itoa(Org_id)
var returnData[] string
for key ,_ := range r.Form["tag"] {
tag := r.Form["tag"][key]
value_input := r.Form["value_input"][key]
remarks := r.Form["remarks"][key]
if(value_input!=""){
arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` )
fmt.Println(arr_data_itemtag)
returnData = append(returnData, arr_data_itemtag[0])
returnData = append(returnData, arr_data_itemtag[1])
if(strings.Contains(arr_data_itemtag[1] , `Error`)){
returnData = append(returnData, `HasError`) | conditional_block |
|
main.rs | fmt::Result {self.as_never()}
}
impl ErrorTrait for Never {}
}
#[macro_use]
mod rect;
mod network;
mod absm;
pub struct Setup {
///Map from input device coordinates to output client coordinates.
pub mapping: Mapping,
///Specify a minimum and a maximum on the final client coordinates.
pub clip: Rect<i32>,
///Specify a range of pressures.
///Events with a pressure outside this range are ignored.
pub pressure: [f32; 2],
///Specify a range of sizes, similarly to `pressure`.
pub size: [f32; 2],
}
impl Setup {
fn new(info: &ServerInfo,config: &Config)->Setup {
//Target area is set immutably by the config
let target=config.target;
//Start off with source area as the entire device screen
//Source area is more mutable than target area
let mut source=Rect{min: Pair([0.0; 2]),max: info.server_screen_res};
println!("device screen area: {}",source);
//Correct any device rotations
if config.correct_device_orientation {
if source.aspect()!=target.aspect() {
//Source screen should be rotated 90ยฐ counterclockwise to correct orientation
source.rotate_negative();
println!("rotated 90ยฐ counterclockwise to correct device orientation");
}else{
println!("device orientation is aligned with client orientation");
}
}else{
println!("device orientation correction is disabled");
}
//Apply config device source area proportions
let mut source=Rect{
min: source.map(|int| int as f32).denormalizer().apply(config.source.min),
max: source.map(|int| int as f32).denormalizer().apply(config.source.max),
};
//Correct orientation if source and target don't have matching aspects
if config.correct_orientation {
if source.aspect()!=target.aspect() {
source.rotate_negative();
println!("rotated 90ยฐ counterclockwise to correct orientation mismatch");
}else{
println!("final orientation matches target orientation");
}
}else{
println!("final orientation correction is disabled");
}
//Shrink a source axis to match target aspect ratio
if config.keep_aspect_ratio {
let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| {
let fixed_axis=shrink_axis.swap();
//Get the target size of the shrink axis
let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis)
/ target.virtual_size(fixed_axis) as f32;
source.resize_virtual_axis(shrink_axis,target);
};
match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() {
Ordering::Greater=>{
//Shrink vertically to match aspect ratio
let old=source.virtual_size(Axis::Y);
shrink(&mut source,Axis::Y);
println!(
"shrank source area vertically from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::Y)
);
},
Ordering::Less=>{
//Shrink horizontally to match aspect ratio
let old=source.virtual_size(Axis::X);
shrink(&mut source,Axis::X);
println!(
"shrank source area horizontally from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::X)
);
},
Ordering::Equal=>{
println!("source aspect ratio matches target aspect ratio");
},
}
}else{
println!("aspect ratio correction is disabled");
}
println!("mapping source area {} to target area {}",source,target);
let pressure=[
config.pressure_range[0].unwrap_or(-std::f32::INFINITY),
config.pressure_range[1].unwrap_or(std::f32::INFINITY),
];
let size=[
config.size_range[0].unwrap_or(-std::f32::INFINITY),
config.size_range[1].unwrap_or(std::f32::INFINITY),
];
println!("clipping target to {}",config.clip);
println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size);
Setup{
mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()),
clip: config.clip,
pressure,size,
}
}
fn consume(&mut self,ev: MouseMove) {
if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return}
if ev.size<self.size[0] || ev.size>self.size[1] {return}
let pos=self.mapping.apply(ev.pos);
let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i]));
MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]);
}
}
#[derive(Deserialize,Serialize)]
#[serde(default)]
pub struct Config {
///The target area to be mapped, in screen pixels.
pub target: Rect<i32>,
///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`.
pub source: Rect<f32>,
///After all transformations, clip mouse positions to this rectangle.
pub clip: Rect<i32>,
///If the device screen is rotated, rotate it back to compensate.
pub correct_device_orientation: bool,
///If after all transformations the source area is rotated, rotate it back to match target
///orientation (landscape or portrait).
pub correct_orientation: bool,
///If the source area does not have the same aspect ratio as the target area, shrink it a bit
///in a single axis to fit.
pub keep_aspect_ratio: bool,
///Only allow touches within this pressure range to go through.
pub pressure_range: [Option<f32>; 2],
///Only allow touches within this size range to go through.
pub size_range: [Option<f32>; 2],
///Connect to this remote.
pub remote: Remote,
///When ADB port forwarding, map this port on the device.
pub android_usb_port: u16, | impl Default for Config {
fn default()->Config {
let screen_res=get_screen_resolution();
Config{
target: Rect{min: pair!(_=>0),max: screen_res},
source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)},
clip: Rect{min: pair!(_=>0),max: screen_res},
correct_device_orientation: true,
correct_orientation: true,
keep_aspect_ratio: true,
pressure_range: [None; 2],
size_range: [None; 2],
remote: Remote::Tcp("localhost".into(),8517),
android_usb_port: 8517,
android_attempt_usb_connection: true,
}
}
}
impl Config {
fn load_path(cfg_path: &str)->Config {
println!("loading config file at '{}'",cfg_path);
match File::open(&cfg_path) {
Err(err)=>{
println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err);
let config=Config::default();
match File::create(&cfg_path) {
Err(err)=>{
println!("failed to create config file on '{}':\n {}",cfg_path,err);
},
Ok(mut file)=>{
let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config");
file.write_all(cfg.as_bytes()).expect("failed to write config file");
println!("created default config file on '{}'",cfg_path);
},
}
config
},
Ok(file)=>{
let config=ron::de::from_reader(file).expect("malformed configuration file");
println!("loaded config file '{}'",cfg_path);
config
},
}
}
}
#[derive(Deserialize)]
struct MouseMove {
pos: Pair<f32>,
pressure: f32,
size: f32,
}
fn get_screen_resolution()->Pair<i32> {
let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions");
Pair([screenshot.width() as i32,screenshot.height() as i32])
}
fn try_adb_forward<P: AsRef<Path>>(path: P,config: &Config)->Result<()> {
use std::process::{Command};
let local_port=match config.remote {
Remote::Tcp(_,port)=>port,
_ => {
println!("not connecting through tcp, skipping adb port forwarding");
return Ok(())
},
};
println!("attempting to adb port forward using executable on '{}'",path.as_ref().display());
let out=Command::new(path.as_ref())
.arg("forward")
.arg(format!("tcp:{}",local_port))
.arg(format!("tcp:{}",config.android_usb_port))
.output();
match out {
Ok(out)=>{
if out.status.success() {
println!(" adb exit code indicates success");
Ok(())
}else{
| ///Whether to attempt to do ADB port forwarding automatically.
///The android device needs to have `USB Debugging` enabled.
pub android_attempt_usb_connection: bool,
} | random_line_split |
main.rs | orientation");
}else{
println!("device orientation is aligned with client orientation");
}
}else{
println!("device orientation correction is disabled");
}
//Apply config device source area proportions
let mut source=Rect{
min: source.map(|int| int as f32).denormalizer().apply(config.source.min),
max: source.map(|int| int as f32).denormalizer().apply(config.source.max),
};
//Correct orientation if source and target don't have matching aspects
if config.correct_orientation {
if source.aspect()!=target.aspect() {
source.rotate_negative();
println!("rotated 90ยฐ counterclockwise to correct orientation mismatch");
}else{
println!("final orientation matches target orientation");
}
}else{
println!("final orientation correction is disabled");
}
//Shrink a source axis to match target aspect ratio
if config.keep_aspect_ratio {
let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| {
let fixed_axis=shrink_axis.swap();
//Get the target size of the shrink axis
let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis)
/ target.virtual_size(fixed_axis) as f32;
source.resize_virtual_axis(shrink_axis,target);
};
match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() {
Ordering::Greater=>{
//Shrink vertically to match aspect ratio
let old=source.virtual_size(Axis::Y);
shrink(&mut source,Axis::Y);
println!(
"shrank source area vertically from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::Y)
);
},
Ordering::Less=>{
//Shrink horizontally to match aspect ratio
let old=source.virtual_size(Axis::X);
shrink(&mut source,Axis::X);
println!(
"shrank source area horizontally from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::X)
);
},
Ordering::Equal=>{
println!("source aspect ratio matches target aspect ratio");
},
}
}else{
println!("aspect ratio correction is disabled");
}
println!("mapping source area {} to target area {}",source,target);
let pressure=[
config.pressure_range[0].unwrap_or(-std::f32::INFINITY),
config.pressure_range[1].unwrap_or(std::f32::INFINITY),
];
let size=[
config.size_range[0].unwrap_or(-std::f32::INFINITY),
config.size_range[1].unwrap_or(std::f32::INFINITY),
];
println!("clipping target to {}",config.clip);
println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size);
Setup{
mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()),
clip: config.clip,
pressure,size,
}
}
fn consume(&mut self,ev: MouseMove) {
if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return}
if ev.size<self.size[0] || ev.size>self.size[1] {return}
let pos=self.mapping.apply(ev.pos);
let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i]));
MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]);
}
}
#[derive(Deserialize,Serialize)]
#[serde(default)]
pub struct Config {
///The target area to be mapped, in screen pixels.
pub target: Rect<i32>,
///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`.
pub source: Rect<f32>,
///After all transformations, clip mouse positions to this rectangle.
pub clip: Rect<i32>,
///If the device screen is rotated, rotate it back to compensate.
pub correct_device_orientation: bool,
///If after all transformations the source area is rotated, rotate it back to match target
///orientation (landscape or portrait).
pub correct_orientation: bool,
///If the source area does not have the same aspect ratio as the target area, shrink it a bit
///in a single axis to fit.
pub keep_aspect_ratio: bool,
///Only allow touches within this pressure range to go through.
pub pressure_range: [Option<f32>; 2],
///Only allow touches within this size range to go through.
pub size_range: [Option<f32>; 2],
///Connect to this remote.
pub remote: Remote,
///When ADB port forwarding, map this port on the device.
pub android_usb_port: u16,
///Whether to attempt to do ADB port forwarding automatically.
///The android device needs to have `USB Debugging` enabled.
pub android_attempt_usb_connection: bool,
}
impl Default for Config {
fn default()->Config {
let screen_res=get_screen_resolution();
Config{
target: Rect{min: pair!(_=>0),max: screen_res},
source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)},
clip: Rect{min: pair!(_=>0),max: screen_res},
correct_device_orientation: true,
correct_orientation: true,
keep_aspect_ratio: true,
pressure_range: [None; 2],
size_range: [None; 2],
remote: Remote::Tcp("localhost".into(),8517),
android_usb_port: 8517,
android_attempt_usb_connection: true,
}
}
}
impl Config {
fn load_path(cfg_path: &str)->Config {
println!("loading config file at '{}'",cfg_path);
match File::open(&cfg_path) {
Err(err)=>{
println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err);
let config=Config::default();
match File::create(&cfg_path) {
Err(err)=>{
println!("failed to create config file on '{}':\n {}",cfg_path,err);
},
Ok(mut file)=>{
let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config");
file.write_all(cfg.as_bytes()).expect("failed to write config file");
println!("created default config file on '{}'",cfg_path);
},
}
config
},
Ok(file)=>{
let config=ron::de::from_reader(file).expect("malformed configuration file");
println!("loaded config file '{}'",cfg_path);
config
},
}
}
}
#[derive(Deserialize)]
struct MouseMove {
pos: Pair<f32>,
pressure: f32,
size: f32,
}
fn get_screen_resolution()->Pair<i32> {
let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions");
Pair([screenshot.width() as i32,screenshot.height() as i32])
}
fn try_adb_forward<P: AsRef<Path>>(path: P,config: &Config)->Result<()> {
use std::process::{Command};
let local_port=match config.remote {
Remote::Tcp(_,port)=>port,
_ => {
println!("not connecting through tcp, skipping adb port forwarding");
return Ok(())
},
};
println!("attempting to adb port forward using executable on '{}'",path.as_ref().display());
let out=Command::new(path.as_ref())
.arg("forward")
.arg(format!("tcp:{}",local_port))
.arg(format!("tcp:{}",config.android_usb_port))
.output();
match out {
Ok(out)=>{
if out.status.success() {
println!(" adb exit code indicates success");
Ok(())
}else{
println!(" adb exited with error exit code {:?}",out.status.code());
let lines=|out| for line in String::from_utf8_lossy(out).trim().lines() {
println!(" {}",line.trim());
};
println!(" adb output:");
lines(&out.stdout);
println!(" adb error output:");
lines(&out.stderr);
println!(" device might be disconnected or usb debugging disabled");
Err("error exit code".into())
}
},
Err(err)=>{
println!(
" failed to run command: {}",
err
);
Err("failed to run command".into())
},
}
}
fn main() {
| //Parse arguments
let exec_path;
let cfg_path;
{
let mut args=env::args();
exec_path=args.next().expect("first argument should always be executable path!");
cfg_path=args.next().unwrap_or_else(|| String::from("config.txt"));
}
//Load configuration
let config=Config::load_path(&cfg_path);
//Try port forwarding using adb
if config.android_attempt_usb_connection {
let ok=try_adb_forward(&Path::new(&exec_path).with_file_name("adb"),&config)
.or_else(|_err| try_adb_forward("adb",&config));
match ok {
Ok(())=>println!(
"opened communication tunnel to android device"
), | identifier_body |
|
main.rs | fmt::Result {self.as_never()}
}
impl ErrorTrait for Never {}
}
#[macro_use]
mod rect;
mod network;
mod absm;
pub struct Setup {
///Map from input device coordinates to output client coordinates.
pub mapping: Mapping,
///Specify a minimum and a maximum on the final client coordinates.
pub clip: Rect<i32>,
///Specify a range of pressures.
///Events with a pressure outside this range are ignored.
pub pressure: [f32; 2],
///Specify a range of sizes, similarly to `pressure`.
pub size: [f32; 2],
}
impl Setup {
fn new(info: &ServerInfo,config: &Config)->Setup {
//Target area is set immutably by the config
let target=config.target;
//Start off with source area as the entire device screen
//Source area is more mutable than target area
let mut source=Rect{min: Pair([0.0; 2]),max: info.server_screen_res};
println!("device screen area: {}",source);
//Correct any device rotations
if config.correct_device_orientation {
if source.aspect()!=target.aspect() {
//Source screen should be rotated 90ยฐ counterclockwise to correct orientation
source.rotate_negative();
println!("rotated 90ยฐ counterclockwise to correct device orientation");
}else{
println!("device orientation is aligned with client orientation");
}
}else{
println!("device orientation correction is disabled");
}
//Apply config device source area proportions
let mut source=Rect{
min: source.map(|int| int as f32).denormalizer().apply(config.source.min),
max: source.map(|int| int as f32).denormalizer().apply(config.source.max),
};
//Correct orientation if source and target don't have matching aspects
if config.correct_orientation {
if source.aspect()!=target.aspect() {
source.rotate_negative();
println!("rotated 90ยฐ counterclockwise to correct orientation mismatch");
}else{
println!("final orientation matches target orientation");
}
}else{
println!("final orientation correction is disabled");
}
//Shrink a source axis to match target aspect ratio
if config.keep_aspect_ratio {
let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| {
let fixed_axis=shrink_axis.swap();
//Get the target size of the shrink axis
let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis)
/ target.virtual_size(fixed_axis) as f32;
source.resize_virtual_axis(shrink_axis,target);
};
match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() {
Ordering::Greater=>{
//Shrink vertically to match aspect ratio
let old=source.virtual_size(Axis::Y);
shrink(&mut source,Axis::Y);
println!(
"shrank source area vertically from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::Y)
);
},
Ordering::Less=>{
//Shrink horizontally to match aspect ratio
let old=source.virtual_size(Axis::X);
shrink(&mut source,Axis::X);
println!(
"shrank source area horizontally from {} to {} to match target aspect ratio",
old,source.virtual_size(Axis::X)
);
},
Ordering::Equal=>{
println!("source aspect ratio matches target aspect ratio");
},
}
}else{
println!("aspect ratio correction is disabled");
}
println!("mapping source area {} to target area {}",source,target);
let pressure=[
config.pressure_range[0].unwrap_or(-std::f32::INFINITY),
config.pressure_range[1].unwrap_or(std::f32::INFINITY),
];
let size=[
config.size_range[0].unwrap_or(-std::f32::INFINITY),
config.size_range[1].unwrap_or(std::f32::INFINITY),
];
println!("clipping target to {}",config.clip);
println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size);
Setup{
mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()),
clip: config.clip,
pressure,size,
}
}
fn consume(&mut self,ev: MouseMove) {
if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return}
if ev.size<self.size[0] || ev.size>self.size[1] {return}
let pos=self.mapping.apply(ev.pos);
let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i]));
MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]);
}
}
#[derive(Deserialize,Serialize)]
#[serde(default)]
pub struct Config {
///The target area to be mapped, in screen pixels.
pub target: Rect<i32>,
///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`.
pub source: Rect<f32>,
///After all transformations, clip mouse positions to this rectangle.
pub clip: Rect<i32>,
///If the device screen is rotated, rotate it back to compensate.
pub correct_device_orientation: bool,
///If after all transformations the source area is rotated, rotate it back to match target
///orientation (landscape or portrait).
pub correct_orientation: bool,
///If the source area does not have the same aspect ratio as the target area, shrink it a bit
///in a single axis to fit.
pub keep_aspect_ratio: bool,
///Only allow touches within this pressure range to go through.
pub pressure_range: [Option<f32>; 2],
///Only allow touches within this size range to go through.
pub size_range: [Option<f32>; 2],
///Connect to this remote.
pub remote: Remote,
///When ADB port forwarding, map this port on the device.
pub android_usb_port: u16,
///Whether to attempt to do ADB port forwarding automatically.
///The android device needs to have `USB Debugging` enabled.
pub android_attempt_usb_connection: bool,
}
impl Default for Config {
fn default()->Config {
let screen_res=get_screen_resolution();
Config{
target: Rect{min: pair!(_=>0),max: screen_res},
source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)},
clip: Rect{min: pair!(_=>0),max: screen_res},
correct_device_orientation: true,
correct_orientation: true,
keep_aspect_ratio: true,
pressure_range: [None; 2],
size_range: [None; 2],
remote: Remote::Tcp("localhost".into(),8517),
android_usb_port: 8517,
android_attempt_usb_connection: true,
}
}
}
impl Config {
fn load_path(cfg_path: &str)->Config {
println!("loading config file at '{}'",cfg_path);
match File::open(&cfg_path) {
Err(err)=>{
println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err);
let config=Config::default();
match File::create(&cfg_path) {
Err(err)=>{
println!("failed to create config file on '{}':\n {}",cfg_path,err);
},
Ok(mut file)=>{
let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config");
file.write_all(cfg.as_bytes()).expect("failed to write config file");
println!("created default config file on '{}'",cfg_path);
},
}
config
},
Ok(file)=>{
let config=ron::de::from_reader(file).expect("malformed configuration file");
println!("loaded config file '{}'",cfg_path);
config
},
}
}
}
#[derive(Deserialize)]
struct MouseMove {
pos: Pair<f32>,
pressure: f32,
size: f32,
}
fn get_screen_resolution()->Pair<i32> {
let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions");
Pair([screenshot.width() as i32,screenshot.height() as i32])
}
fn try | AsRef<Path>>(path: P,config: &Config)->Result<()> {
use std::process::{Command};
let local_port=match config.remote {
Remote::Tcp(_,port)=>port,
_ => {
println!("not connecting through tcp, skipping adb port forwarding");
return Ok(())
},
};
println!("attempting to adb port forward using executable on '{}'",path.as_ref().display());
let out=Command::new(path.as_ref())
.arg("forward")
.arg(format!("tcp:{}",local_port))
.arg(format!("tcp:{}",config.android_usb_port))
.output();
match out {
Ok(out)=>{
if out.status.success() {
println!(" adb exit code indicates success");
Ok(())
}else{
| _adb_forward<P: | identifier_name |
offset-monitor.rs | }
}
fn run(cfg: Config) -> Result<()> {
let mut client = KafkaClient::new(cfg.brokers.clone());
client.set_group_offset_storage(cfg.offset_storage);
try!(client.load_metadata_all());
// ~ if no topic specified, print all available and be done.
if cfg.topic.is_empty() {
let ts = client.topics();
let num_topics = ts.len();
if num_topics == 0 {
bail!("no topics available");
}
let mut names: Vec<&str> = Vec::with_capacity(ts.len());
names.extend(ts.names());
names.sort();
let mut buf = BufWriter::with_capacity(1024, stdout());
for name in names {
let _ = write!(buf, "topic: {}\n", name);
}
bail!("choose a topic");
}
// ~ otherwise let's loop over the topic partition offsets
let num_partitions = match client.topics().partitions(&cfg.topic) {
None => bail!(format!("no such topic: {}", &cfg.topic)),
Some(partitions) => partitions.len(),
};
let mut state = State::new(num_partitions, cfg.commited_not_consumed);
let mut printer = Printer::new(stdout(), &cfg);
try!(printer.print_head(num_partitions));
// ~ initialize the state
let mut first_time = true;
loop {
let t = time::now();
try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group));
if first_time {
state.curr_to_prev();
first_time = false;
}
try!(printer.print_offsets(&t, &state.offsets));
thread::sleep(cfg.period);
}
}
#[derive(Copy, Clone)]
struct Partition {
prev_latest: i64,
curr_latest: i64,
curr_lag: i64,
}
impl Default for Partition {
fn default() -> Self {
Partition {
prev_latest: -1,
curr_latest: -1,
curr_lag: -1,
}
}
}
struct State {
offsets: Vec<Partition>,
lag_decr: i64,
}
impl State {
fn new(num_partitions: usize, commited_not_consumed: bool) -> State {
State {
offsets: vec![Default::default(); num_partitions],
lag_decr: if commited_not_consumed { 0 } else { 1 },
}
}
fn update_partitions(
&mut self,
client: &mut KafkaClient,
topic: &str,
group: &str,
) -> Result<()> {
// ~ get the latest topic offsets
let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest));
for l in latests {
let off = self.offsets.get_mut(l.partition as usize).expect(
"[topic offset] non-existent partition",
);
off.prev_latest = off.curr_latest;
off.curr_latest = l.offset;
}
if !group.is_empty() {
// ~ get the current group offsets
let groups = try!(client.fetch_group_topic_offsets(group, topic));
for g in groups {
let off = self.offsets.get_mut(g.partition as usize).expect(
"[group offset] non-existent partition",
);
// ~ it's quite likely that we fetched group offsets
// which are a bit ahead of the topic's latest offset
// since we issued the fetch-latest-offset request
// earlier than the request for the group offsets
off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr);
}
}
Ok(())
}
fn curr_to_prev(&mut self) {
for o in &mut self.offsets {
o.prev_latest = o.curr_latest;
}
}
}
struct Printer<W> {
out: W,
timefmt: String,
fmt_buf: String,
out_buf: String,
time_width: usize,
offset_width: usize,
diff_width: usize,
lag_width: usize,
print_diff: bool,
print_lag: bool,
print_summary: bool,
}
impl<W: Write> Printer<W> {
fn new(out: W, cfg: &Config) -> Printer<W> {
Printer {
out: out,
timefmt: "%H:%M:%S".into(),
fmt_buf: String::with_capacity(30),
out_buf: String::with_capacity(160),
time_width: 10,
offset_width: 11,
diff_width: 8,
lag_width: 6,
print_diff: cfg.diff,
print_lag: !cfg.group.is_empty(),
print_summary: cfg.summary,
}
}
fn print_head(&mut self, num_partitions: usize) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time");
if self.print_summary {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic");
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
} else {
for i in 0..num_partitions {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "p-{}", i);
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf);
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
}
}
self.out_buf.push('\n');
}
{
// ~ print
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
self.fmt_buf.clear();
let _ =
write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt"));
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf);
if self.print_summary {
let mut prev_latest = 0;
let mut curr_latest = 0;
let mut curr_lag = 0;
for p in partitions {
macro_rules! cond_add {
($v:ident) => {
if $v != -1 {
if p.$v < 0 { $v = -1; } else { $v += p.$v; }
}
}
};
cond_add!(prev_latest);
cond_add!(curr_latest);
cond_add!(curr_lag);
}
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag { | let _ = write!(self.fmt_buf, "({})", curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
} else {
for p in partitions {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "({})", p.curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
}
}
}
self.out_buf.push('\n');
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
// --------------------------------------------------------------------
struct Config {
brokers: Vec<String>,
topic: String,
group: String,
offset_storage: GroupOffsetStorage,
period: stdtime::Duration,
commited_not_consumed: bool,
summary: bool,
diff: bool,
}
impl Config {
fn from_cmdline() -> Result<Config> {
let args: Vec<String> = env::args().collect();
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "Print this help screen");
opts.optopt("", "brokers", "Specify kafka bootstrap | self.fmt_buf.clear(); | random_line_split |
offset-monitor.rs | }
}
fn run(cfg: Config) -> Result<()> {
let mut client = KafkaClient::new(cfg.brokers.clone());
client.set_group_offset_storage(cfg.offset_storage);
try!(client.load_metadata_all());
// ~ if no topic specified, print all available and be done.
if cfg.topic.is_empty() {
let ts = client.topics();
let num_topics = ts.len();
if num_topics == 0 {
bail!("no topics available");
}
let mut names: Vec<&str> = Vec::with_capacity(ts.len());
names.extend(ts.names());
names.sort();
let mut buf = BufWriter::with_capacity(1024, stdout());
for name in names {
let _ = write!(buf, "topic: {}\n", name);
}
bail!("choose a topic");
}
// ~ otherwise let's loop over the topic partition offsets
let num_partitions = match client.topics().partitions(&cfg.topic) {
None => bail!(format!("no such topic: {}", &cfg.topic)),
Some(partitions) => partitions.len(),
};
let mut state = State::new(num_partitions, cfg.commited_not_consumed);
let mut printer = Printer::new(stdout(), &cfg);
try!(printer.print_head(num_partitions));
// ~ initialize the state
let mut first_time = true;
loop {
let t = time::now();
try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group));
if first_time {
state.curr_to_prev();
first_time = false;
}
try!(printer.print_offsets(&t, &state.offsets));
thread::sleep(cfg.period);
}
}
#[derive(Copy, Clone)]
struct | {
prev_latest: i64,
curr_latest: i64,
curr_lag: i64,
}
impl Default for Partition {
fn default() -> Self {
Partition {
prev_latest: -1,
curr_latest: -1,
curr_lag: -1,
}
}
}
struct State {
offsets: Vec<Partition>,
lag_decr: i64,
}
impl State {
fn new(num_partitions: usize, commited_not_consumed: bool) -> State {
State {
offsets: vec![Default::default(); num_partitions],
lag_decr: if commited_not_consumed { 0 } else { 1 },
}
}
fn update_partitions(
&mut self,
client: &mut KafkaClient,
topic: &str,
group: &str,
) -> Result<()> {
// ~ get the latest topic offsets
let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest));
for l in latests {
let off = self.offsets.get_mut(l.partition as usize).expect(
"[topic offset] non-existent partition",
);
off.prev_latest = off.curr_latest;
off.curr_latest = l.offset;
}
if !group.is_empty() {
// ~ get the current group offsets
let groups = try!(client.fetch_group_topic_offsets(group, topic));
for g in groups {
let off = self.offsets.get_mut(g.partition as usize).expect(
"[group offset] non-existent partition",
);
// ~ it's quite likely that we fetched group offsets
// which are a bit ahead of the topic's latest offset
// since we issued the fetch-latest-offset request
// earlier than the request for the group offsets
off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr);
}
}
Ok(())
}
fn curr_to_prev(&mut self) {
for o in &mut self.offsets {
o.prev_latest = o.curr_latest;
}
}
}
struct Printer<W> {
out: W,
timefmt: String,
fmt_buf: String,
out_buf: String,
time_width: usize,
offset_width: usize,
diff_width: usize,
lag_width: usize,
print_diff: bool,
print_lag: bool,
print_summary: bool,
}
impl<W: Write> Printer<W> {
fn new(out: W, cfg: &Config) -> Printer<W> {
Printer {
out: out,
timefmt: "%H:%M:%S".into(),
fmt_buf: String::with_capacity(30),
out_buf: String::with_capacity(160),
time_width: 10,
offset_width: 11,
diff_width: 8,
lag_width: 6,
print_diff: cfg.diff,
print_lag: !cfg.group.is_empty(),
print_summary: cfg.summary,
}
}
fn print_head(&mut self, num_partitions: usize) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time");
if self.print_summary {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic");
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
} else {
for i in 0..num_partitions {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "p-{}", i);
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf);
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
}
}
self.out_buf.push('\n');
}
{
// ~ print
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
self.fmt_buf.clear();
let _ =
write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt"));
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf);
if self.print_summary {
let mut prev_latest = 0;
let mut curr_latest = 0;
let mut curr_lag = 0;
for p in partitions {
macro_rules! cond_add {
($v:ident) => {
if $v != -1 {
if p.$v < 0 { $v = -1; } else { $v += p.$v; }
}
}
};
cond_add!(prev_latest);
cond_add!(curr_latest);
cond_add!(curr_lag);
}
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "({})", curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
} else {
for p in partitions {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "({})", p.curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
}
}
}
self.out_buf.push('\n');
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
// --------------------------------------------------------------------
struct Config {
brokers: Vec<String>,
topic: String,
group: String,
offset_storage: GroupOffsetStorage,
period: stdtime::Duration,
commited_not_consumed: bool,
summary: bool,
diff: bool,
}
impl Config {
fn from_cmdline() -> Result<Config> {
let args: Vec<String> = env::args().collect();
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "Print this help screen");
opts.optopt("", "brokers", "Specify kafka bootstrap | Partition | identifier_name |
offset-monitor.rs | }
}
fn run(cfg: Config) -> Result<()> {
let mut client = KafkaClient::new(cfg.brokers.clone());
client.set_group_offset_storage(cfg.offset_storage);
try!(client.load_metadata_all());
// ~ if no topic specified, print all available and be done.
if cfg.topic.is_empty() {
let ts = client.topics();
let num_topics = ts.len();
if num_topics == 0 {
bail!("no topics available");
}
let mut names: Vec<&str> = Vec::with_capacity(ts.len());
names.extend(ts.names());
names.sort();
let mut buf = BufWriter::with_capacity(1024, stdout());
for name in names {
let _ = write!(buf, "topic: {}\n", name);
}
bail!("choose a topic");
}
// ~ otherwise let's loop over the topic partition offsets
let num_partitions = match client.topics().partitions(&cfg.topic) {
None => bail!(format!("no such topic: {}", &cfg.topic)),
Some(partitions) => partitions.len(),
};
let mut state = State::new(num_partitions, cfg.commited_not_consumed);
let mut printer = Printer::new(stdout(), &cfg);
try!(printer.print_head(num_partitions));
// ~ initialize the state
let mut first_time = true;
loop {
let t = time::now();
try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group));
if first_time {
state.curr_to_prev();
first_time = false;
}
try!(printer.print_offsets(&t, &state.offsets));
thread::sleep(cfg.period);
}
}
#[derive(Copy, Clone)]
struct Partition {
prev_latest: i64,
curr_latest: i64,
curr_lag: i64,
}
impl Default for Partition {
fn default() -> Self {
Partition {
prev_latest: -1,
curr_latest: -1,
curr_lag: -1,
}
}
}
struct State {
offsets: Vec<Partition>,
lag_decr: i64,
}
impl State {
fn new(num_partitions: usize, commited_not_consumed: bool) -> State {
State {
offsets: vec![Default::default(); num_partitions],
lag_decr: if commited_not_consumed { 0 } else { 1 },
}
}
fn update_partitions(
&mut self,
client: &mut KafkaClient,
topic: &str,
group: &str,
) -> Result<()> {
// ~ get the latest topic offsets
let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest));
for l in latests {
let off = self.offsets.get_mut(l.partition as usize).expect(
"[topic offset] non-existent partition",
);
off.prev_latest = off.curr_latest;
off.curr_latest = l.offset;
}
if !group.is_empty() {
// ~ get the current group offsets
let groups = try!(client.fetch_group_topic_offsets(group, topic));
for g in groups {
let off = self.offsets.get_mut(g.partition as usize).expect(
"[group offset] non-existent partition",
);
// ~ it's quite likely that we fetched group offsets
// which are a bit ahead of the topic's latest offset
// since we issued the fetch-latest-offset request
// earlier than the request for the group offsets
off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr);
}
}
Ok(())
}
fn curr_to_prev(&mut self) {
for o in &mut self.offsets {
o.prev_latest = o.curr_latest;
}
}
}
struct Printer<W> {
out: W,
timefmt: String,
fmt_buf: String,
out_buf: String,
time_width: usize,
offset_width: usize,
diff_width: usize,
lag_width: usize,
print_diff: bool,
print_lag: bool,
print_summary: bool,
}
impl<W: Write> Printer<W> {
fn new(out: W, cfg: &Config) -> Printer<W> |
fn print_head(&mut self, num_partitions: usize) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time");
if self.print_summary {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic");
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
} else {
for i in 0..num_partitions {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "p-{}", i);
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf);
if self.print_diff {
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth");
}
if self.print_lag {
let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)");
}
}
}
self.out_buf.push('\n');
}
{
// ~ print
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> {
self.out_buf.clear();
{
// ~ format
use std::fmt::Write;
self.fmt_buf.clear();
let _ =
write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt"));
let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf);
if self.print_summary {
let mut prev_latest = 0;
let mut curr_latest = 0;
let mut curr_lag = 0;
for p in partitions {
macro_rules! cond_add {
($v:ident) => {
if $v != -1 {
if p.$v < 0 { $v = -1; } else { $v += p.$v; }
}
}
};
cond_add!(prev_latest);
cond_add!(curr_latest);
cond_add!(curr_lag);
}
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "({})", curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
} else {
for p in partitions {
let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest);
if self.print_diff {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest);
let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf);
}
if self.print_lag {
self.fmt_buf.clear();
let _ = write!(self.fmt_buf, "({})", p.curr_lag);
let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf);
}
}
}
}
self.out_buf.push('\n');
try!(self.out.write_all(self.out_buf.as_bytes()));
Ok(())
}
}
// --------------------------------------------------------------------
struct Config {
brokers: Vec<String>,
topic: String,
group: String,
offset_storage: GroupOffsetStorage,
period: stdtime::Duration,
commited_not_consumed: bool,
summary: bool,
diff: bool,
}
impl Config {
fn from_cmdline() -> Result<Config> {
let args: Vec<String> = env::args().collect();
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "Print this help screen");
opts.optopt("", "brokers", "Specify kafka | {
Printer {
out: out,
timefmt: "%H:%M:%S".into(),
fmt_buf: String::with_capacity(30),
out_buf: String::with_capacity(160),
time_width: 10,
offset_width: 11,
diff_width: 8,
lag_width: 6,
print_diff: cfg.diff,
print_lag: !cfg.group.is_empty(),
print_summary: cfg.summary,
}
} | identifier_body |
export_animation.py | for i in range(2):
if isinstance(pos[i], (int, float)):
pos[i] = pos[i] - half_size[i]
pos[i] = int(coreapi.global_scale * pos[i])
clip = clip.set_position(pos)
else:
clip = clip.set_position(pos)
if scale[0] != 1.0 or scale[1] != 1.0:
clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1])))
return clip
def _update_clip_duration(track):
def is_connected(prev_clip, cur_clip):
return math.isclose(
prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3,
)
prev_clip_info = None
for clip_info in track:
if prev_clip_info is not None:
if prev_clip_info.auto_extend:
prev_clip_info.duration = clip_info.start - prev_clip_info.start
prev_clip_info.auto_extend = False
assert prev_clip_info.duration > 0
# Apply fadeout to previous clip if it's not connected with
# current clip.
if prev_clip_info.crossfade > 0 and not is_connected(
prev_clip_info, clip_info
):
prev_clip_info.fadeout = prev_clip_info.crossfade
prev_clip_info = clip_info
# Update last clip duration
if prev_clip_info is not None:
if prev_clip_info.auto_extend:
duration = prev_clip_info.duration
# Extend the last video clip to match the voice track
if "re" in coreapi.pos_dict:
duration = max(duration, coreapi.pos_dict["re"] - clip_info.start)
prev_clip_info.duration = duration
prev_clip_info.auto_extend = False
if prev_clip_info.crossfade > 0:
prev_clip_info.fadeout = prev_clip_info.crossfade
def _export_video(*, resolution, audio_only):
resolution = [int(x * coreapi.global_scale) for x in resolution]
audio_clips = []
# Update clip duration for each track
for track in datastruct.video_tracks.values():
_update_clip_duration(track)
# TODO: post-process video track clips
# Update MoviePy clip object in each track.
video_clips = []
for track_name, track in datastruct.video_tracks.items():
for i, clip_info in enumerate(track):
assert clip_info.mpy_clip is not None
assert clip_info.duration is not None
# Unlink audio clip from video clip (adjust audio duration)
if clip_info.no_audio:
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
elif clip_info.mpy_clip.audio is not None:
audio_clip = clip_info.mpy_clip.audio
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
# Audio timing
# TODO: audio subclip
if clip_info.subclip is not None:
duration = clip_info.subclip[1] - clip_info.subclip[0]
audio_clip = audio_clip.subclip(
clip_info.subclip[0], clip_info.subclip[1]
)
else:
duration = clip_info.duration
duration = min(duration, audio_clip.duration)
audio_clip = audio_clip.set_duration(duration)
audio_clip = audio_clip.set_start(clip_info.start)
# Adjust volume
if clip_info.norm:
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.audio_normalize
)
if clip_info.vol is not None:
if isinstance(clip_info.vol, (int, float)):
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.volumex,
clip_info.vol,
)
else:
audio_clip = _adjust_mpy_audio_clip_volume(
audio_clip, clip_info.vol
)
audio_clips.append(audio_clip)
# If the next clip has crossfade enabled
crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0
if crossfade_duration:
# clip_info.fadeout = crossfade_duration # Fadeout current clip
clip_info.duration += crossfade_duration
clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info))
# Deal with video fade in / out / crossfade
if clip_info.fadein:
assert isinstance(clip_info.fadein, (int, float))
# TODO: crossfadein and crossfadeout is very slow in moviepy
if track_name != "vid":
clip_info.mpy_clip = clip_info.mpy_clip.crossfadein(
clip_info.fadein
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadein,
clip_info.fadein,
)
elif (
clip_info.crossfade > 0
): # crossfade and fadein should not happen at the same time
video_clips.append(
clip_info.mpy_clip.set_duration(clip_info.crossfade)
.crossfadein(clip_info.crossfade)
.set_start(clip_info.start)
)
clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade)
clip_info.start += clip_info.crossfade
if clip_info.fadeout:
assert isinstance(clip_info.fadeout, (int, float))
if track_name != "vid":
# pylint: disable=maybe-no-member
clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout(
clip_info.fadeout
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadeout,
clip_info.fadeout,
)
video_clips.append(clip_info.mpy_clip.set_start(clip_info.start))
if len(video_clips) == 0:
video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2))
# raise Exception("no video clips??")
final_clip = CompositeVideoClip(video_clips, size=resolution)
# Resize here is too late, does not speed up the video encoding at all.
# final_clip = final_clip.resize(width=480)
# Deal with audio clips
for _, track in datastruct.audio_tracks.items():
clips = []
for clip_info in track.clips:
if clip_info.loop:
# HACK: reload the clip.
#
# still don't know why using loaded mpy_clip directly will cause
# "IndexError: index -200001 is out of bounds for axis 0 with
# size 0"...
clip = AudioFileClip(clip_info.file, buffersize=400000)
else:
clip = clip_info.mpy_clip
if clip_info.subclip is not None:
clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1])
duration = clip_info.duration
if duration is not None:
|
if clip_info.start is not None:
clip = clip.set_start(clip_info.start)
# Adjust volume by keypoints
if len(clip_info.vol_keypoints) > 0:
clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints)
clips.append(clip)
if len(clips) > 0:
clip = CompositeAudioClip(clips)
audio_clips.append(clip)
if final_clip.audio:
audio_clips.append(final_clip.audio)
if len(audio_clips) > 0:
final_audio_clip = CompositeAudioClip(audio_clips)
# XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'.
# See: https://github.com/Zulko/moviepy/issues/863
# final_audio_clip.fps = 44100
final_clip = final_clip.set_audio(final_audio_clip)
# final_clip.show(10.5, interactive=True)
os.makedirs("tmp/out", exist_ok=True)
if audio_only:
final_audio_clip.fps = 44100
final_audio_clip.write_audiofile("%s.mp3" % out_filename)
open_with("%s.mp3" % out_filename, program_id=0)
else:
final_clip.write_videofile(
"%s.mp4" % out_filename,
temp_audiofile="%s.mp3" % out_filename,
remove_temp=False,
codec="libx264",
threads=8,
fps=coreapi.FPS,
ffmpeg_params=["-crf", "19"],
)
subprocess.Popen(
["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"],
close_fds=True,
)
def _adjust_mpy_audio_clip_volume(clip, vol | if clip_info.loop:
# pylint: disable=maybe-no-member
clip = clip.fx(afx.audio_loop, duration=duration)
else:
duration = min(duration, clip.duration)
if clip_info.subclip:
duration = min(
duration, clip_info.subclip[1] - clip_info.subclip[0]
)
clip = clip.set_duration(duration) | conditional_block |
export_animation.py | _info.fadeout = prev_clip_info.crossfade
def _export_video(*, resolution, audio_only):
resolution = [int(x * coreapi.global_scale) for x in resolution]
audio_clips = []
# Update clip duration for each track
for track in datastruct.video_tracks.values():
_update_clip_duration(track)
# TODO: post-process video track clips
# Update MoviePy clip object in each track.
video_clips = []
for track_name, track in datastruct.video_tracks.items():
for i, clip_info in enumerate(track):
assert clip_info.mpy_clip is not None
assert clip_info.duration is not None
# Unlink audio clip from video clip (adjust audio duration)
if clip_info.no_audio:
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
elif clip_info.mpy_clip.audio is not None:
audio_clip = clip_info.mpy_clip.audio
clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None)
# Audio timing
# TODO: audio subclip
if clip_info.subclip is not None:
duration = clip_info.subclip[1] - clip_info.subclip[0]
audio_clip = audio_clip.subclip(
clip_info.subclip[0], clip_info.subclip[1]
)
else:
duration = clip_info.duration
duration = min(duration, audio_clip.duration)
audio_clip = audio_clip.set_duration(duration)
audio_clip = audio_clip.set_start(clip_info.start)
# Adjust volume
if clip_info.norm:
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.audio_normalize
)
if clip_info.vol is not None:
if isinstance(clip_info.vol, (int, float)):
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.volumex,
clip_info.vol,
)
else:
audio_clip = _adjust_mpy_audio_clip_volume(
audio_clip, clip_info.vol
)
audio_clips.append(audio_clip)
# If the next clip has crossfade enabled
crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0
if crossfade_duration:
# clip_info.fadeout = crossfade_duration # Fadeout current clip
clip_info.duration += crossfade_duration
clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info))
# Deal with video fade in / out / crossfade
if clip_info.fadein:
assert isinstance(clip_info.fadein, (int, float))
# TODO: crossfadein and crossfadeout is very slow in moviepy
if track_name != "vid":
clip_info.mpy_clip = clip_info.mpy_clip.crossfadein(
clip_info.fadein
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadein,
clip_info.fadein,
)
elif (
clip_info.crossfade > 0
): # crossfade and fadein should not happen at the same time
video_clips.append(
clip_info.mpy_clip.set_duration(clip_info.crossfade)
.crossfadein(clip_info.crossfade)
.set_start(clip_info.start)
)
clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade)
clip_info.start += clip_info.crossfade
if clip_info.fadeout:
assert isinstance(clip_info.fadeout, (int, float))
if track_name != "vid":
# pylint: disable=maybe-no-member
clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout(
clip_info.fadeout
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadeout,
clip_info.fadeout,
)
video_clips.append(clip_info.mpy_clip.set_start(clip_info.start))
if len(video_clips) == 0:
video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2))
# raise Exception("no video clips??")
final_clip = CompositeVideoClip(video_clips, size=resolution)
# Resize here is too late, does not speed up the video encoding at all.
# final_clip = final_clip.resize(width=480)
# Deal with audio clips
for _, track in datastruct.audio_tracks.items():
clips = []
for clip_info in track.clips:
if clip_info.loop:
# HACK: reload the clip.
#
# still don't know why using loaded mpy_clip directly will cause
# "IndexError: index -200001 is out of bounds for axis 0 with
# size 0"...
clip = AudioFileClip(clip_info.file, buffersize=400000)
else:
clip = clip_info.mpy_clip
if clip_info.subclip is not None:
clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1])
duration = clip_info.duration
if duration is not None:
if clip_info.loop:
# pylint: disable=maybe-no-member
clip = clip.fx(afx.audio_loop, duration=duration)
else:
duration = min(duration, clip.duration)
if clip_info.subclip:
duration = min(
duration, clip_info.subclip[1] - clip_info.subclip[0]
)
clip = clip.set_duration(duration)
if clip_info.start is not None:
clip = clip.set_start(clip_info.start)
# Adjust volume by keypoints
if len(clip_info.vol_keypoints) > 0:
clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints)
clips.append(clip)
if len(clips) > 0:
clip = CompositeAudioClip(clips)
audio_clips.append(clip)
if final_clip.audio:
audio_clips.append(final_clip.audio)
if len(audio_clips) > 0:
final_audio_clip = CompositeAudioClip(audio_clips)
# XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'.
# See: https://github.com/Zulko/moviepy/issues/863
# final_audio_clip.fps = 44100
final_clip = final_clip.set_audio(final_audio_clip)
# final_clip.show(10.5, interactive=True)
os.makedirs("tmp/out", exist_ok=True)
if audio_only:
final_audio_clip.fps = 44100
final_audio_clip.write_audiofile("%s.mp3" % out_filename)
open_with("%s.mp3" % out_filename, program_id=0)
else:
final_clip.write_videofile(
"%s.mp4" % out_filename,
temp_audiofile="%s.mp3" % out_filename,
remove_temp=False,
codec="libx264",
threads=8,
fps=coreapi.FPS,
ffmpeg_params=["-crf", "19"],
)
subprocess.Popen(
["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"],
close_fds=True,
)
def _adjust_mpy_audio_clip_volume(clip, vol_keypoints):
xp = []
fp = []
print("vol_keypoints:", vol_keypoints)
for (p, vol) in vol_keypoints:
if isinstance(vol, (int, float)):
xp.append(p)
fp.append(vol)
else:
raise Exception("unsupported bgm parameter type:" % type(vol))
def volume_adjust(gf, t):
factor = np.interp(t, xp, fp)
factor = np.vstack([factor, factor]).T
return factor * gf(t)
return clip.fl(volume_adjust)
# def _export_srt():
# with open("out.srt", "w", encoding="utf-8") as f:
# f.write("\n".join(_srt_lines))
def _convert_to_readable_time(seconds):
seconds = int(seconds)
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
if hour > 0:
return "%d:%02d:%02d" % (hour, minutes, seconds)
else:
return "%02d:%02d" % (minutes, seconds)
def _write_timestamp(t, section_name):
os.makedirs(os.path.dirname(out_filename), exist_ok=True)
if not hasattr(_write_timestamp, "f"):
_write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8")
_write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t)))
_write_timestamp.f.flush()
@core.api
def include(file):
with open(file, "r", encoding="utf-8") as f: | s = f.read()
cwd = os.getcwd() | random_line_split |
|
export_animation.py | audio subclip
if clip_info.subclip is not None:
duration = clip_info.subclip[1] - clip_info.subclip[0]
audio_clip = audio_clip.subclip(
clip_info.subclip[0], clip_info.subclip[1]
)
else:
duration = clip_info.duration
duration = min(duration, audio_clip.duration)
audio_clip = audio_clip.set_duration(duration)
audio_clip = audio_clip.set_start(clip_info.start)
# Adjust volume
if clip_info.norm:
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.audio_normalize
)
if clip_info.vol is not None:
if isinstance(clip_info.vol, (int, float)):
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.volumex,
clip_info.vol,
)
else:
audio_clip = _adjust_mpy_audio_clip_volume(
audio_clip, clip_info.vol
)
audio_clips.append(audio_clip)
# If the next clip has crossfade enabled
crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0
if crossfade_duration:
# clip_info.fadeout = crossfade_duration # Fadeout current clip
clip_info.duration += crossfade_duration
clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info))
# Deal with video fade in / out / crossfade
if clip_info.fadein:
assert isinstance(clip_info.fadein, (int, float))
# TODO: crossfadein and crossfadeout is very slow in moviepy
if track_name != "vid":
clip_info.mpy_clip = clip_info.mpy_clip.crossfadein(
clip_info.fadein
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadein,
clip_info.fadein,
)
elif (
clip_info.crossfade > 0
): # crossfade and fadein should not happen at the same time
video_clips.append(
clip_info.mpy_clip.set_duration(clip_info.crossfade)
.crossfadein(clip_info.crossfade)
.set_start(clip_info.start)
)
clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade)
clip_info.start += clip_info.crossfade
if clip_info.fadeout:
assert isinstance(clip_info.fadeout, (int, float))
if track_name != "vid":
# pylint: disable=maybe-no-member
clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout(
clip_info.fadeout
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadeout,
clip_info.fadeout,
)
video_clips.append(clip_info.mpy_clip.set_start(clip_info.start))
if len(video_clips) == 0:
video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2))
# raise Exception("no video clips??")
final_clip = CompositeVideoClip(video_clips, size=resolution)
# Resize here is too late, does not speed up the video encoding at all.
# final_clip = final_clip.resize(width=480)
# Deal with audio clips
for _, track in datastruct.audio_tracks.items():
clips = []
for clip_info in track.clips:
if clip_info.loop:
# HACK: reload the clip.
#
# still don't know why using loaded mpy_clip directly will cause
# "IndexError: index -200001 is out of bounds for axis 0 with
# size 0"...
clip = AudioFileClip(clip_info.file, buffersize=400000)
else:
clip = clip_info.mpy_clip
if clip_info.subclip is not None:
clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1])
duration = clip_info.duration
if duration is not None:
if clip_info.loop:
# pylint: disable=maybe-no-member
clip = clip.fx(afx.audio_loop, duration=duration)
else:
duration = min(duration, clip.duration)
if clip_info.subclip:
duration = min(
duration, clip_info.subclip[1] - clip_info.subclip[0]
)
clip = clip.set_duration(duration)
if clip_info.start is not None:
clip = clip.set_start(clip_info.start)
# Adjust volume by keypoints
if len(clip_info.vol_keypoints) > 0:
clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints)
clips.append(clip)
if len(clips) > 0:
clip = CompositeAudioClip(clips)
audio_clips.append(clip)
if final_clip.audio:
audio_clips.append(final_clip.audio)
if len(audio_clips) > 0:
final_audio_clip = CompositeAudioClip(audio_clips)
# XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'.
# See: https://github.com/Zulko/moviepy/issues/863
# final_audio_clip.fps = 44100
final_clip = final_clip.set_audio(final_audio_clip)
# final_clip.show(10.5, interactive=True)
os.makedirs("tmp/out", exist_ok=True)
if audio_only:
final_audio_clip.fps = 44100
final_audio_clip.write_audiofile("%s.mp3" % out_filename)
open_with("%s.mp3" % out_filename, program_id=0)
else:
final_clip.write_videofile(
"%s.mp4" % out_filename,
temp_audiofile="%s.mp3" % out_filename,
remove_temp=False,
codec="libx264",
threads=8,
fps=coreapi.FPS,
ffmpeg_params=["-crf", "19"],
)
subprocess.Popen(
["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"],
close_fds=True,
)
def _adjust_mpy_audio_clip_volume(clip, vol_keypoints):
xp = []
fp = []
print("vol_keypoints:", vol_keypoints)
for (p, vol) in vol_keypoints:
if isinstance(vol, (int, float)):
xp.append(p)
fp.append(vol)
else:
raise Exception("unsupported bgm parameter type:" % type(vol))
def volume_adjust(gf, t):
factor = np.interp(t, xp, fp)
factor = np.vstack([factor, factor]).T
return factor * gf(t)
return clip.fl(volume_adjust)
# def _export_srt():
# with open("out.srt", "w", encoding="utf-8") as f:
# f.write("\n".join(_srt_lines))
def _convert_to_readable_time(seconds):
seconds = int(seconds)
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
if hour > 0:
return "%d:%02d:%02d" % (hour, minutes, seconds)
else:
return "%02d:%02d" % (minutes, seconds)
def _write_timestamp(t, section_name):
os.makedirs(os.path.dirname(out_filename), exist_ok=True)
if not hasattr(_write_timestamp, "f"):
_write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8")
_write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t)))
_write_timestamp.f.flush()
@core.api
def include(file):
with open(file, "r", encoding="utf-8") as f:
s = f.read()
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(file)))
_parse_text(s)
os.chdir(cwd)
def _remove_unused_recordings(s):
| used_recordings = set()
unused_recordings = []
apis = {"record": (lambda f, **kargs: used_recordings.add(f))}
_parse_text(s, apis=apis)
files = [f for f in glob.glob("record/*") if os.path.isfile(f)]
files = [f.replace("\\", "/") for f in files]
for f in files:
if f not in used_recordings:
unused_recordings.append(f)
print2("Used : %d" % len(used_recordings), color="green")
print2("Unused : %d" % len(unused_recordings), color="red")
assert len(used_recordings) + len(unused_recordings) == len(files)
print("Press y to clean up: ", end="", flush=True)
if getch() == "y":
for f in unused_recordings:
try: | identifier_body |
|
export_animation.py | .subclip[0]
audio_clip = audio_clip.subclip(
clip_info.subclip[0], clip_info.subclip[1]
)
else:
duration = clip_info.duration
duration = min(duration, audio_clip.duration)
audio_clip = audio_clip.set_duration(duration)
audio_clip = audio_clip.set_start(clip_info.start)
# Adjust volume
if clip_info.norm:
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.audio_normalize
)
if clip_info.vol is not None:
if isinstance(clip_info.vol, (int, float)):
audio_clip = audio_clip.fx(
# pylint: disable=maybe-no-member
afx.volumex,
clip_info.vol,
)
else:
audio_clip = _adjust_mpy_audio_clip_volume(
audio_clip, clip_info.vol
)
audio_clips.append(audio_clip)
# If the next clip has crossfade enabled
crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0
if crossfade_duration:
# clip_info.fadeout = crossfade_duration # Fadeout current clip
clip_info.duration += crossfade_duration
clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info))
# Deal with video fade in / out / crossfade
if clip_info.fadein:
assert isinstance(clip_info.fadein, (int, float))
# TODO: crossfadein and crossfadeout is very slow in moviepy
if track_name != "vid":
clip_info.mpy_clip = clip_info.mpy_clip.crossfadein(
clip_info.fadein
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadein,
clip_info.fadein,
)
elif (
clip_info.crossfade > 0
): # crossfade and fadein should not happen at the same time
video_clips.append(
clip_info.mpy_clip.set_duration(clip_info.crossfade)
.crossfadein(clip_info.crossfade)
.set_start(clip_info.start)
)
clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade)
clip_info.start += clip_info.crossfade
if clip_info.fadeout:
assert isinstance(clip_info.fadeout, (int, float))
if track_name != "vid":
# pylint: disable=maybe-no-member
clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout(
clip_info.fadeout
)
else:
clip_info.mpy_clip = clip_info.mpy_clip.fx(
# pylint: disable=maybe-no-member
vfx.fadeout,
clip_info.fadeout,
)
video_clips.append(clip_info.mpy_clip.set_start(clip_info.start))
if len(video_clips) == 0:
video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2))
# raise Exception("no video clips??")
final_clip = CompositeVideoClip(video_clips, size=resolution)
# Resize here is too late, does not speed up the video encoding at all.
# final_clip = final_clip.resize(width=480)
# Deal with audio clips
for _, track in datastruct.audio_tracks.items():
clips = []
for clip_info in track.clips:
if clip_info.loop:
# HACK: reload the clip.
#
# still don't know why using loaded mpy_clip directly will cause
# "IndexError: index -200001 is out of bounds for axis 0 with
# size 0"...
clip = AudioFileClip(clip_info.file, buffersize=400000)
else:
clip = clip_info.mpy_clip
if clip_info.subclip is not None:
clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1])
duration = clip_info.duration
if duration is not None:
if clip_info.loop:
# pylint: disable=maybe-no-member
clip = clip.fx(afx.audio_loop, duration=duration)
else:
duration = min(duration, clip.duration)
if clip_info.subclip:
duration = min(
duration, clip_info.subclip[1] - clip_info.subclip[0]
)
clip = clip.set_duration(duration)
if clip_info.start is not None:
clip = clip.set_start(clip_info.start)
# Adjust volume by keypoints
if len(clip_info.vol_keypoints) > 0:
clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints)
clips.append(clip)
if len(clips) > 0:
clip = CompositeAudioClip(clips)
audio_clips.append(clip)
if final_clip.audio:
audio_clips.append(final_clip.audio)
if len(audio_clips) > 0:
final_audio_clip = CompositeAudioClip(audio_clips)
# XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'.
# See: https://github.com/Zulko/moviepy/issues/863
# final_audio_clip.fps = 44100
final_clip = final_clip.set_audio(final_audio_clip)
# final_clip.show(10.5, interactive=True)
os.makedirs("tmp/out", exist_ok=True)
if audio_only:
final_audio_clip.fps = 44100
final_audio_clip.write_audiofile("%s.mp3" % out_filename)
open_with("%s.mp3" % out_filename, program_id=0)
else:
final_clip.write_videofile(
"%s.mp4" % out_filename,
temp_audiofile="%s.mp3" % out_filename,
remove_temp=False,
codec="libx264",
threads=8,
fps=coreapi.FPS,
ffmpeg_params=["-crf", "19"],
)
subprocess.Popen(
["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"],
close_fds=True,
)
def _adjust_mpy_audio_clip_volume(clip, vol_keypoints):
xp = []
fp = []
print("vol_keypoints:", vol_keypoints)
for (p, vol) in vol_keypoints:
if isinstance(vol, (int, float)):
xp.append(p)
fp.append(vol)
else:
raise Exception("unsupported bgm parameter type:" % type(vol))
def volume_adjust(gf, t):
factor = np.interp(t, xp, fp)
factor = np.vstack([factor, factor]).T
return factor * gf(t)
return clip.fl(volume_adjust)
# def _export_srt():
# with open("out.srt", "w", encoding="utf-8") as f:
# f.write("\n".join(_srt_lines))
def _convert_to_readable_time(seconds):
seconds = int(seconds)
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
if hour > 0:
return "%d:%02d:%02d" % (hour, minutes, seconds)
else:
return "%02d:%02d" % (minutes, seconds)
def _write_timestamp(t, section_name):
os.makedirs(os.path.dirname(out_filename), exist_ok=True)
if not hasattr(_write_timestamp, "f"):
_write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8")
_write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t)))
_write_timestamp.f.flush()
@core.api
def include(file):
with open(file, "r", encoding="utf-8") as f:
s = f.read()
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(file)))
_parse_text(s)
os.chdir(cwd)
def _remove_unused_recordings(s):
used_recordings = set()
unused_recordings = []
apis = {"record": (lambda f, **kargs: used_recordings.add(f))}
_parse_text(s, apis=apis)
files = [f for f in glob.glob("record/*") if os.path.isfile(f)]
files = [f.replace("\\", "/") for f in files]
for f in files:
if f not in used_recordings:
unused_recordings.append(f)
print2("Used : %d" % len(used_recordings), color="green")
print2("Unused : %d" % len(unused_recordings), color="red")
assert len(used_recordings) + len(unused_recordings) == len(files)
print("Press y to clean up: ", end="", flush=True)
if getch() == "y":
for f in unused_recordings:
try:
os.remove(f)
except:
print("WARNING: failed to remove: %s" % f)
def | _parse_text | identifier_name |
|
mock.rs | >;
type OnKilledAccount = ();
type OnNewAccount = ();
type OnSetCode = ();
type Origin = Origin;
type PalletInfo = PalletInfo;
type SS58Prefix = SS58Prefix;
type SystemWeightInfo = ();
type Version = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 1;
}
impl pallet_balances::Config for Test {
type AccountStore = System;
type Balance = Balance;
type DustRemoval = ();
type Event = Event;
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = ();
type MaxReserves = ();
type ReserveIdentifier = [u8; 4];
type WeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = 1;
}
impl pallet_timestamp::Config for Test {
type MinimumPeriod = MinimumPeriod;
type Moment = u64;
type OnTimestampSet = Aura;
type WeightInfo = ();
}
parameter_types! {
pub const MaxAuthorities: u32 = 100_000;
}
impl pallet_aura::Config for Test {
type AuthorityId = sp_consensus_aura::sr25519::AuthorityId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
}
sp_runtime::impl_opaque_keys! {
pub struct MockSessionKeys {
// a key for aura authoring
pub aura: UintAuthorityId,
}
}
impl From<UintAuthorityId> for MockSessionKeys {
fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self {
Self { aura }
}
}
parameter_types! {
pub static SessionHandlerCollators: Vec<u64> = vec![];
pub static SessionChangeBlock: u64 = 0;
}
pub struct TestSessionHandler;
impl pallet_session::SessionHandler<u64> for TestSessionHandler {
const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) {
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) {
SessionChangeBlock::set(System::block_number());
dbg!(keys.len());
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_before_session_ending() {}
fn on_disabled(_: u32) {}
}
impl pallet_session::Config for Test {
type Event = Event;
type Keys = MockSessionKeys;
type NextSessionRotation = Stake;
type SessionHandler = TestSessionHandler;
type SessionManager = Stake;
type ShouldEndSession = Stake;
type ValidatorId = <Self as frame_system::Config>::AccountId;
// we don't have stash and controller, thus we don't need the convert as well.
type ValidatorIdOf = crate::IdentityCollator;
type WeightInfo = ();
}
parameter_types! {
pub const MinBlocksPerRound: u32 = 3;
pub const BlocksPerRound: u32 = 5;
pub const LeaveCandidatesDelay: u32 = 2;
pub const LeaveNominatorsDelay: u32 = 2;
pub const RevokeNominationDelay: u32 = 2;
pub const RewardPaymentDelay: u32 = 2;
pub const MinSelectedCandidates: u32 = 5;
pub const MaxNominatorsPerCollator: u32 = 4;
pub const MaxCollatorsPerNominator: u32 = 4;
pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20);
pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30);
pub const MinCollatorStk: u128 = 10;
pub const MinNominatorStk: u128 = 5;
pub const MinNomination: u128 = 3;
}
impl Config for Test {
type BlocksPerRound = BlocksPerRound;
type Currency = Balances;
type DefaultCollatorCommission = DefaultCollatorCommission;
type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent;
type Event = Event;
type LeaveCandidatesDelay = LeaveCandidatesDelay;
type LeaveNominatorsDelay = LeaveNominatorsDelay;
type MaxCollatorsPerNominator = MaxCollatorsPerNominator;
type MaxNominatorsPerCollator = MaxNominatorsPerCollator;
type MinBlocksPerRound = MinBlocksPerRound;
type MinCollatorCandidateStk = MinCollatorStk;
type MinCollatorStk = MinCollatorStk;
type MinNomination = MinNomination;
type MinNominatorStk = MinNominatorStk;
type MinSelectedCandidates = MinSelectedCandidates;
type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>;
type RevokeNominationDelay = RevokeNominationDelay;
type RewardPaymentDelay = RewardPaymentDelay;
type WeightInfo = ();
}
pub(crate) struct ExtBuilder {
// endowed accounts with balances
balances: Vec<(AccountId, Balance)>,
// [collator, amount]
collators: Vec<(AccountId, Balance)>,
// [nominator, collator, nomination_amount]
nominations: Vec<(AccountId, AccountId, Balance)>,
// inflation config
inflation: InflationInfo<Balance>,
}
impl Default for ExtBuilder {
fn default() -> ExtBuilder {
ExtBuilder {
balances: vec![],
nominations: vec![],
collators: vec![],
inflation: InflationInfo {
expect: Range {
min: 700,
ideal: 700,
max: 700,
},
// not used
annual: Range {
min: Perbill::from_percent(50),
ideal: Perbill::from_percent(50),
max: Perbill::from_percent(50),
},
// unrealistically high parameterization, only for testing
round: Range {
min: Perbill::from_percent(5),
ideal: Perbill::from_percent(5),
max: Perbill::from_percent(5),
},
},
}
}
}
impl ExtBuilder {
pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self {
self.balances = balances;
self
}
pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self {
self.collators = collators;
self
}
pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self {
self.nominations = nominations;
self
}
#[allow(dead_code)]
pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self {
self.inflation = inflation;
self
}
pub(crate) fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.expect("Frame system builds valid default genesis config");
pallet_balances::GenesisConfig::<Test> {
balances: self.balances,
}
.assimilate_storage(&mut t)
.expect("Pallet balances storage can be assimilated");
stake::GenesisConfig::<Test> {
candidates: self.collators,
nominations: self.nominations,
inflation_config: self.inflation,
}
.assimilate_storage(&mut t)
.expect("Parachain Staking's storage can be assimilated");
let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let keys = validators
.iter()
.map(|i| {
(*i, *i, MockSessionKeys {
aura: UintAuthorityId(*i),
})
})
.collect::<Vec<_>>();
pallet_session::GenesisConfig::<Test> { keys }
.assimilate_storage(&mut t)
.expect("Pallet session storage can be assimilated");
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
pub(crate) fn roll_to(n: u64) {
while System::block_number() < n {
Balances::on_finalize(System::block_number());
Stake::on_finalize(System::block_number());
Session::on_finalize(System::block_number());
Aura::on_finalize(System::block_number());
System::on_finalize(System::block_number());
System::set_block_number(System::block_number() + 1);
System::on_initialize(System::block_number());
Timestamp::on_initialize(System::block_number());
Balances::on_initialize(System::block_number());
Stake::on_initialize(System::block_number());
Session::on_initialize(System::block_number());
Aura::on_initialize(System::block_number());
}
}
pub(crate) fn last_event() -> Event | {
System::events().pop().expect("Event expected").event
} | identifier_body |
|
mock.rs | type Block = frame_system::mocking::MockBlock<Test>;
// Configure a mock runtime to test the pallet.
construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
Stake: stake::{Pallet, Call, Storage, Config<T>, Event<T>},
Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>},
Aura: pallet_aura::{Pallet, Storage, Config<T>},
}
);
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
pub const SS58Prefix: u8 = 42;
}
impl frame_system::Config for Test {
type AccountData = pallet_balances::AccountData<Balance>;
type AccountId = AccountId;
type BaseCallFilter = Everything;
type BlockHashCount = BlockHashCount;
type BlockLength = ();
type BlockNumber = BlockNumber;
type BlockWeights = ();
type Call = Call;
type DbWeight = ();
type Event = Event;
type Hash = H256;
type Hashing = BlakeTwo256;
type Header = Header;
type Index = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type OnKilledAccount = ();
type OnNewAccount = ();
type OnSetCode = ();
type Origin = Origin;
type PalletInfo = PalletInfo;
type SS58Prefix = SS58Prefix;
type SystemWeightInfo = ();
type Version = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 1;
}
impl pallet_balances::Config for Test {
type AccountStore = System;
type Balance = Balance;
type DustRemoval = ();
type Event = Event;
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = ();
type MaxReserves = ();
type ReserveIdentifier = [u8; 4];
type WeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = 1;
}
impl pallet_timestamp::Config for Test {
type MinimumPeriod = MinimumPeriod;
type Moment = u64;
type OnTimestampSet = Aura;
type WeightInfo = ();
}
parameter_types! {
pub const MaxAuthorities: u32 = 100_000;
}
impl pallet_aura::Config for Test {
type AuthorityId = sp_consensus_aura::sr25519::AuthorityId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
}
sp_runtime::impl_opaque_keys! {
pub struct MockSessionKeys {
// a key for aura authoring
pub aura: UintAuthorityId,
}
}
impl From<UintAuthorityId> for MockSessionKeys {
fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self {
Self { aura }
}
}
parameter_types! {
pub static SessionHandlerCollators: Vec<u64> = vec![];
pub static SessionChangeBlock: u64 = 0;
}
pub struct TestSessionHandler;
impl pallet_session::SessionHandler<u64> for TestSessionHandler {
const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) {
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) {
SessionChangeBlock::set(System::block_number());
dbg!(keys.len());
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_before_session_ending() {}
fn on_disabled(_: u32) {}
}
impl pallet_session::Config for Test {
type Event = Event;
type Keys = MockSessionKeys;
type NextSessionRotation = Stake;
type SessionHandler = TestSessionHandler;
type SessionManager = Stake;
type ShouldEndSession = Stake;
type ValidatorId = <Self as frame_system::Config>::AccountId;
// we don't have stash and controller, thus we don't need the convert as well.
type ValidatorIdOf = crate::IdentityCollator;
type WeightInfo = ();
}
parameter_types! {
pub const MinBlocksPerRound: u32 = 3;
pub const BlocksPerRound: u32 = 5;
pub const LeaveCandidatesDelay: u32 = 2;
pub const LeaveNominatorsDelay: u32 = 2;
pub const RevokeNominationDelay: u32 = 2;
pub const RewardPaymentDelay: u32 = 2;
pub const MinSelectedCandidates: u32 = 5;
pub const MaxNominatorsPerCollator: u32 = 4;
pub const MaxCollatorsPerNominator: u32 = 4;
pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20);
pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30);
pub const MinCollatorStk: u128 = 10;
pub const MinNominatorStk: u128 = 5;
pub const MinNomination: u128 = 3;
}
impl Config for Test {
type BlocksPerRound = BlocksPerRound;
type Currency = Balances;
type DefaultCollatorCommission = DefaultCollatorCommission;
type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent;
type Event = Event;
type LeaveCandidatesDelay = LeaveCandidatesDelay;
type LeaveNominatorsDelay = LeaveNominatorsDelay;
type MaxCollatorsPerNominator = MaxCollatorsPerNominator;
type MaxNominatorsPerCollator = MaxNominatorsPerCollator;
type MinBlocksPerRound = MinBlocksPerRound;
type MinCollatorCandidateStk = MinCollatorStk;
type MinCollatorStk = MinCollatorStk;
type MinNomination = MinNomination;
type MinNominatorStk = MinNominatorStk;
type MinSelectedCandidates = MinSelectedCandidates;
type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>;
type RevokeNominationDelay = RevokeNominationDelay;
type RewardPaymentDelay = RewardPaymentDelay;
type WeightInfo = ();
}
pub(crate) struct ExtBuilder {
// endowed accounts with balances
balances: Vec<(AccountId, Balance)>,
// [collator, amount]
collators: Vec<(AccountId, Balance)>,
// [nominator, collator, nomination_amount]
nominations: Vec<(AccountId, AccountId, Balance)>,
// inflation config
inflation: InflationInfo<Balance>,
}
impl Default for ExtBuilder {
fn default() -> ExtBuilder {
ExtBuilder {
balances: vec![],
nominations: vec![],
collators: vec![],
inflation: InflationInfo {
expect: Range {
min: 700,
ideal: 700,
max: 700,
},
// not used
annual: Range {
min: Perbill::from_percent(50),
ideal: Perbill::from_percent(50),
max: Perbill::from_percent(50),
},
// unrealistically high parameterization, only for testing
round: Range {
min: Perbill::from_percent(5),
ideal: Perbill::from_percent(5),
max: Perbill::from_percent(5),
},
},
}
}
}
impl ExtBuilder {
pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self {
self.balances = balances;
self
}
pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self {
self.collators = collators;
self
}
pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self {
self.nominations = nominations;
self
}
#[allow(dead_code)]
pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self {
self.inflation = inflation;
self
}
pub(crate) fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.expect("Frame system builds valid default genesis config");
pallet_balances::GenesisConfig::<Test> {
balances: self.balances,
}
.assimilate_storage(&mut t)
.expect("Pallet balances storage can be assimilated");
stake::GenesisConfig::< | pub type Balance = u128;
pub type BlockNumber = u64;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>; | random_line_split |
|
mock.rs | Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
pub const SS58Prefix: u8 = 42;
}
impl frame_system::Config for Test {
type AccountData = pallet_balances::AccountData<Balance>;
type AccountId = AccountId;
type BaseCallFilter = Everything;
type BlockHashCount = BlockHashCount;
type BlockLength = ();
type BlockNumber = BlockNumber;
type BlockWeights = ();
type Call = Call;
type DbWeight = ();
type Event = Event;
type Hash = H256;
type Hashing = BlakeTwo256;
type Header = Header;
type Index = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type OnKilledAccount = ();
type OnNewAccount = ();
type OnSetCode = ();
type Origin = Origin;
type PalletInfo = PalletInfo;
type SS58Prefix = SS58Prefix;
type SystemWeightInfo = ();
type Version = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 1;
}
impl pallet_balances::Config for Test {
type AccountStore = System;
type Balance = Balance;
type DustRemoval = ();
type Event = Event;
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = ();
type MaxReserves = ();
type ReserveIdentifier = [u8; 4];
type WeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = 1;
}
impl pallet_timestamp::Config for Test {
type MinimumPeriod = MinimumPeriod;
type Moment = u64;
type OnTimestampSet = Aura;
type WeightInfo = ();
}
parameter_types! {
pub const MaxAuthorities: u32 = 100_000;
}
impl pallet_aura::Config for Test {
type AuthorityId = sp_consensus_aura::sr25519::AuthorityId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
}
sp_runtime::impl_opaque_keys! {
pub struct MockSessionKeys {
// a key for aura authoring
pub aura: UintAuthorityId,
}
}
impl From<UintAuthorityId> for MockSessionKeys {
fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self {
Self { aura }
}
}
parameter_types! {
pub static SessionHandlerCollators: Vec<u64> = vec![];
pub static SessionChangeBlock: u64 = 0;
}
pub struct TestSessionHandler;
impl pallet_session::SessionHandler<u64> for TestSessionHandler {
const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) {
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn | <Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) {
SessionChangeBlock::set(System::block_number());
dbg!(keys.len());
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_before_session_ending() {}
fn on_disabled(_: u32) {}
}
impl pallet_session::Config for Test {
type Event = Event;
type Keys = MockSessionKeys;
type NextSessionRotation = Stake;
type SessionHandler = TestSessionHandler;
type SessionManager = Stake;
type ShouldEndSession = Stake;
type ValidatorId = <Self as frame_system::Config>::AccountId;
// we don't have stash and controller, thus we don't need the convert as well.
type ValidatorIdOf = crate::IdentityCollator;
type WeightInfo = ();
}
parameter_types! {
pub const MinBlocksPerRound: u32 = 3;
pub const BlocksPerRound: u32 = 5;
pub const LeaveCandidatesDelay: u32 = 2;
pub const LeaveNominatorsDelay: u32 = 2;
pub const RevokeNominationDelay: u32 = 2;
pub const RewardPaymentDelay: u32 = 2;
pub const MinSelectedCandidates: u32 = 5;
pub const MaxNominatorsPerCollator: u32 = 4;
pub const MaxCollatorsPerNominator: u32 = 4;
pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20);
pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30);
pub const MinCollatorStk: u128 = 10;
pub const MinNominatorStk: u128 = 5;
pub const MinNomination: u128 = 3;
}
impl Config for Test {
type BlocksPerRound = BlocksPerRound;
type Currency = Balances;
type DefaultCollatorCommission = DefaultCollatorCommission;
type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent;
type Event = Event;
type LeaveCandidatesDelay = LeaveCandidatesDelay;
type LeaveNominatorsDelay = LeaveNominatorsDelay;
type MaxCollatorsPerNominator = MaxCollatorsPerNominator;
type MaxNominatorsPerCollator = MaxNominatorsPerCollator;
type MinBlocksPerRound = MinBlocksPerRound;
type MinCollatorCandidateStk = MinCollatorStk;
type MinCollatorStk = MinCollatorStk;
type MinNomination = MinNomination;
type MinNominatorStk = MinNominatorStk;
type MinSelectedCandidates = MinSelectedCandidates;
type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>;
type RevokeNominationDelay = RevokeNominationDelay;
type RewardPaymentDelay = RewardPaymentDelay;
type WeightInfo = ();
}
pub(crate) struct ExtBuilder {
// endowed accounts with balances
balances: Vec<(AccountId, Balance)>,
// [collator, amount]
collators: Vec<(AccountId, Balance)>,
// [nominator, collator, nomination_amount]
nominations: Vec<(AccountId, AccountId, Balance)>,
// inflation config
inflation: InflationInfo<Balance>,
}
impl Default for ExtBuilder {
fn default() -> ExtBuilder {
ExtBuilder {
balances: vec![],
nominations: vec![],
collators: vec![],
inflation: InflationInfo {
expect: Range {
min: 700,
ideal: 700,
max: 700,
},
// not used
annual: Range {
min: Perbill::from_percent(50),
ideal: Perbill::from_percent(50),
max: Perbill::from_percent(50),
},
// unrealistically high parameterization, only for testing
round: Range {
min: Perbill::from_percent(5),
ideal: Perbill::from_percent(5),
max: Perbill::from_percent(5),
},
},
}
}
}
impl ExtBuilder {
pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self {
self.balances = balances;
self
}
pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self {
self.collators = collators;
self
}
pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self {
self.nominations = nominations;
self
}
#[allow(dead_code)]
pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self {
self.inflation = inflation;
self
}
pub(crate) fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.expect("Frame system builds valid default genesis config");
pallet_balances::GenesisConfig::<Test> {
balances: self.balances,
}
.assimilate_storage(&mut t)
.expect("Pallet balances storage can be assimilated");
stake::GenesisConfig::<Test> {
candidates: self.collators,
nominations: self.nominations,
inflation_config: self.inflation,
}
.assimilate_storage(&mut t)
.expect("Parachain Staking's storage can be assimilated");
let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let keys = validators
.iter()
.map(|i| {
(*i, *i, MockSessionKeys {
aura: UintAuthorityId(*i),
})
})
.collect::<Vec<_>>();
pallet_session::GenesisConfig::<Test> { keys }
.assimilate_storage(&mut t)
.expect("Pallet session storage can be assimilated");
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
pub(crate) fn roll_to(n: u64) {
while System | on_new_session | identifier_name |
mock.rs | {
pub const ExistentialDeposit: u128 = 1;
}
impl pallet_balances::Config for Test {
type AccountStore = System;
type Balance = Balance;
type DustRemoval = ();
type Event = Event;
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = ();
type MaxReserves = ();
type ReserveIdentifier = [u8; 4];
type WeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = 1;
}
impl pallet_timestamp::Config for Test {
type MinimumPeriod = MinimumPeriod;
type Moment = u64;
type OnTimestampSet = Aura;
type WeightInfo = ();
}
parameter_types! {
pub const MaxAuthorities: u32 = 100_000;
}
impl pallet_aura::Config for Test {
type AuthorityId = sp_consensus_aura::sr25519::AuthorityId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
}
sp_runtime::impl_opaque_keys! {
pub struct MockSessionKeys {
// a key for aura authoring
pub aura: UintAuthorityId,
}
}
impl From<UintAuthorityId> for MockSessionKeys {
fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self {
Self { aura }
}
}
parameter_types! {
pub static SessionHandlerCollators: Vec<u64> = vec![];
pub static SessionChangeBlock: u64 = 0;
}
pub struct TestSessionHandler;
impl pallet_session::SessionHandler<u64> for TestSessionHandler {
const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) {
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) {
SessionChangeBlock::set(System::block_number());
dbg!(keys.len());
SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>())
}
fn on_before_session_ending() {}
fn on_disabled(_: u32) {}
}
impl pallet_session::Config for Test {
type Event = Event;
type Keys = MockSessionKeys;
type NextSessionRotation = Stake;
type SessionHandler = TestSessionHandler;
type SessionManager = Stake;
type ShouldEndSession = Stake;
type ValidatorId = <Self as frame_system::Config>::AccountId;
// we don't have stash and controller, thus we don't need the convert as well.
type ValidatorIdOf = crate::IdentityCollator;
type WeightInfo = ();
}
parameter_types! {
pub const MinBlocksPerRound: u32 = 3;
pub const BlocksPerRound: u32 = 5;
pub const LeaveCandidatesDelay: u32 = 2;
pub const LeaveNominatorsDelay: u32 = 2;
pub const RevokeNominationDelay: u32 = 2;
pub const RewardPaymentDelay: u32 = 2;
pub const MinSelectedCandidates: u32 = 5;
pub const MaxNominatorsPerCollator: u32 = 4;
pub const MaxCollatorsPerNominator: u32 = 4;
pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20);
pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30);
pub const MinCollatorStk: u128 = 10;
pub const MinNominatorStk: u128 = 5;
pub const MinNomination: u128 = 3;
}
impl Config for Test {
type BlocksPerRound = BlocksPerRound;
type Currency = Balances;
type DefaultCollatorCommission = DefaultCollatorCommission;
type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent;
type Event = Event;
type LeaveCandidatesDelay = LeaveCandidatesDelay;
type LeaveNominatorsDelay = LeaveNominatorsDelay;
type MaxCollatorsPerNominator = MaxCollatorsPerNominator;
type MaxNominatorsPerCollator = MaxNominatorsPerCollator;
type MinBlocksPerRound = MinBlocksPerRound;
type MinCollatorCandidateStk = MinCollatorStk;
type MinCollatorStk = MinCollatorStk;
type MinNomination = MinNomination;
type MinNominatorStk = MinNominatorStk;
type MinSelectedCandidates = MinSelectedCandidates;
type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>;
type RevokeNominationDelay = RevokeNominationDelay;
type RewardPaymentDelay = RewardPaymentDelay;
type WeightInfo = ();
}
pub(crate) struct ExtBuilder {
// endowed accounts with balances
balances: Vec<(AccountId, Balance)>,
// [collator, amount]
collators: Vec<(AccountId, Balance)>,
// [nominator, collator, nomination_amount]
nominations: Vec<(AccountId, AccountId, Balance)>,
// inflation config
inflation: InflationInfo<Balance>,
}
impl Default for ExtBuilder {
fn default() -> ExtBuilder {
ExtBuilder {
balances: vec![],
nominations: vec![],
collators: vec![],
inflation: InflationInfo {
expect: Range {
min: 700,
ideal: 700,
max: 700,
},
// not used
annual: Range {
min: Perbill::from_percent(50),
ideal: Perbill::from_percent(50),
max: Perbill::from_percent(50),
},
// unrealistically high parameterization, only for testing
round: Range {
min: Perbill::from_percent(5),
ideal: Perbill::from_percent(5),
max: Perbill::from_percent(5),
},
},
}
}
}
impl ExtBuilder {
pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self {
self.balances = balances;
self
}
pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self {
self.collators = collators;
self
}
pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self {
self.nominations = nominations;
self
}
#[allow(dead_code)]
pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self {
self.inflation = inflation;
self
}
pub(crate) fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.expect("Frame system builds valid default genesis config");
pallet_balances::GenesisConfig::<Test> {
balances: self.balances,
}
.assimilate_storage(&mut t)
.expect("Pallet balances storage can be assimilated");
stake::GenesisConfig::<Test> {
candidates: self.collators,
nominations: self.nominations,
inflation_config: self.inflation,
}
.assimilate_storage(&mut t)
.expect("Parachain Staking's storage can be assimilated");
let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let keys = validators
.iter()
.map(|i| {
(*i, *i, MockSessionKeys {
aura: UintAuthorityId(*i),
})
})
.collect::<Vec<_>>();
pallet_session::GenesisConfig::<Test> { keys }
.assimilate_storage(&mut t)
.expect("Pallet session storage can be assimilated");
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
pub(crate) fn roll_to(n: u64) {
while System::block_number() < n {
Balances::on_finalize(System::block_number());
Stake::on_finalize(System::block_number());
Session::on_finalize(System::block_number());
Aura::on_finalize(System::block_number());
System::on_finalize(System::block_number());
System::set_block_number(System::block_number() + 1);
System::on_initialize(System::block_number());
Timestamp::on_initialize(System::block_number());
Balances::on_initialize(System::block_number());
Stake::on_initialize(System::block_number());
Session::on_initialize(System::block_number());
Aura::on_initialize(System::block_number());
}
}
pub(crate) fn last_event() -> Event {
System::events().pop().expect("Event expected").event
}
pub(crate) fn events() -> Vec<pallet::Event<Test>> {
System::events()
.into_iter()
.map(|r| r.event)
.filter_map(|e| if let Event::Stake(inner) = e { Some(inner) } else | { None } | conditional_block |
|
test_agent.py | .append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if len(pred_list) == 0:
continue
dcg = 0.0
hit_num = 0.0
for i in range(len(pred_list)):
if pred_list[i] in rel_set:
dcg += 1. / (log(i + 2) / log(2))
hit_num += 1
# idcg
idcg = 0.0
for i in range(min(len(rel_set), len(pred_list))):
idcg += 1. / (log(i + 2) / log(2))
ndcg = dcg / idcg
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
ndcgs.append(ndcg)
recalls.append(recall)
precisions.append(precision)
hits.append(hit)
fairness.append(calculate_fairness(pred_list, brand_dict))
avg_precision = np.mean(precisions) * 100
avg_recall = np.mean(recalls) * 100
avg_ndcg = np.mean(ndcgs) * 100
avg_hit = np.mean(hits) * 100
avg_fairness = np.mean(fairness)
print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format(
avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))
def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(uids) # numpy of [bs, dim]
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
model.eval()
for hop in range(3):
state_tensor = torch.FloatTensor(state_pool).to(device)
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
actmask_tensor = torch.ByteTensor(actmask_pool).to(device)
probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool = [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
|
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
path_pool = new_path_pool
probs_pool = new_probs_pool
if hop < 2:
state_pool = env._batch_get_state(path_pool)
return path_pool, probs_pool
def predict_paths(policy_file, path_file, args):
print('Predicting paths...')
env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
pretrain_sd = torch.load(policy_file)
model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
test_labels = load_labels(args.dataset, 'test')
test_uids = list(test_labels.keys())
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args):
embeds = load_embed(args.dataset)
user_embeds = embeds[USER]
purchase_embeds = embeds[PURCHASE][0]
product_embeds = embeds[PRODUCT]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != PRODUCT:
continue
uid = path[0][2]
if uid not in pred_paths:
continue
pid = path[-1][2]
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
for pid in pred_paths[uid]:
if pid in train_pids:
continue
# Get the path with highest probability
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
# 3) Compute top 10 recommended products for each user.
sort_by = 'score'
pred_labels = {}
for uid in best_pred_paths:
if sort_by == 'score':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True)
elif sort_by == 'prob':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True)
topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest
# add up to 10 pids if not enough
if args.add_products and len(topk_pids) < num_recommendations:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in topk_pids:
continue
topk_pids.append(cand_pid)
if len(topk_pids) >= num_recommendations:
break
# end of add
pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest!
return pred_labels
def test(args):
policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs)
path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs)
train_labels = load_labels(args.dataset, 'train')
test_labels = load_labels(args.dataset, 'test')
if args.run_path:
predict_paths(policy_file, path_file, args)
if args.run_eval:
pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args)
evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict)
if __name__ == '__main__':
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset | next_node_type = KG_RELATION[path[-1][1]][relation] | conditional_block |
test_agent.py | .append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if len(pred_list) == 0:
continue
dcg = 0.0
hit_num = 0.0
for i in range(len(pred_list)):
if pred_list[i] in rel_set:
dcg += 1. / (log(i + 2) / log(2))
hit_num += 1
# idcg
idcg = 0.0
for i in range(min(len(rel_set), len(pred_list))):
idcg += 1. / (log(i + 2) / log(2))
ndcg = dcg / idcg
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
ndcgs.append(ndcg)
recalls.append(recall)
precisions.append(precision)
hits.append(hit)
fairness.append(calculate_fairness(pred_list, brand_dict))
avg_precision = np.mean(precisions) * 100
avg_recall = np.mean(recalls) * 100
avg_ndcg = np.mean(ndcgs) * 100
avg_hit = np.mean(hits) * 100
avg_fairness = np.mean(fairness)
print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format(
avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))
def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(uids) # numpy of [bs, dim]
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
model.eval()
for hop in range(3):
state_tensor = torch.FloatTensor(state_pool).to(device)
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
actmask_tensor = torch.ByteTensor(actmask_pool).to(device)
probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool = [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
next_node_type = KG_RELATION[path[-1][1]][relation]
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
path_pool = new_path_pool
probs_pool = new_probs_pool
if hop < 2:
state_pool = env._batch_get_state(path_pool)
return path_pool, probs_pool
def predict_paths(policy_file, path_file, args):
print('Predicting paths...')
env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
pretrain_sd = torch.load(policy_file)
model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
test_labels = load_labels(args.dataset, 'test')
test_uids = list(test_labels.keys())
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args):
embeds = load_embed(args.dataset)
user_embeds = embeds[USER]
purchase_embeds = embeds[PURCHASE][0]
product_embeds = embeds[PRODUCT]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != PRODUCT:
continue | uid = path[0][2]
if uid not in pred_paths:
continue
pid = path[-1][2]
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
for pid in pred_paths[uid]:
if pid in train_pids:
continue
# Get the path with highest probability
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
# 3) Compute top 10 recommended products for each user.
sort_by = 'score'
pred_labels = {}
for uid in best_pred_paths:
if sort_by == 'score':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True)
elif sort_by == 'prob':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True)
topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest
# add up to 10 pids if not enough
if args.add_products and len(topk_pids) < num_recommendations:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in topk_pids:
continue
topk_pids.append(cand_pid)
if len(topk_pids) >= num_recommendations:
break
# end of add
pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest!
return pred_labels
def test(args):
policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs)
path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs)
train_labels = load_labels(args.dataset, 'train')
test_labels = load_labels(args.dataset, 'test')
if args.run_path:
predict_paths(policy_file, path_file, args)
if args.run_eval:
pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args)
evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict)
if __name__ == '__main__':
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type | random_line_split |
|
test_agent.py | dcg += 1. / (log(i + 2) / log(2))
hit_num += 1
# idcg
idcg = 0.0
for i in range(min(len(rel_set), len(pred_list))):
idcg += 1. / (log(i + 2) / log(2))
ndcg = dcg / idcg
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
ndcgs.append(ndcg)
recalls.append(recall)
precisions.append(precision)
hits.append(hit)
fairness.append(calculate_fairness(pred_list, brand_dict))
avg_precision = np.mean(precisions) * 100
avg_recall = np.mean(recalls) * 100
avg_ndcg = np.mean(ndcgs) * 100
avg_hit = np.mean(hits) * 100
avg_fairness = np.mean(fairness)
print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format(
avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))
def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(uids) # numpy of [bs, dim]
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
model.eval()
for hop in range(3):
state_tensor = torch.FloatTensor(state_pool).to(device)
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
actmask_tensor = torch.ByteTensor(actmask_pool).to(device)
probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool = [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
next_node_type = KG_RELATION[path[-1][1]][relation]
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
path_pool = new_path_pool
probs_pool = new_probs_pool
if hop < 2:
state_pool = env._batch_get_state(path_pool)
return path_pool, probs_pool
def predict_paths(policy_file, path_file, args):
print('Predicting paths...')
env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
pretrain_sd = torch.load(policy_file)
model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
test_labels = load_labels(args.dataset, 'test')
test_uids = list(test_labels.keys())
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args):
embeds = load_embed(args.dataset)
user_embeds = embeds[USER]
purchase_embeds = embeds[PURCHASE][0]
product_embeds = embeds[PRODUCT]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != PRODUCT:
continue
uid = path[0][2]
if uid not in pred_paths:
continue
pid = path[-1][2]
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
for pid in pred_paths[uid]:
if pid in train_pids:
continue
# Get the path with highest probability
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
# 3) Compute top 10 recommended products for each user.
sort_by = 'score'
pred_labels = {}
for uid in best_pred_paths:
if sort_by == 'score':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True)
elif sort_by == 'prob':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True)
topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest
# add up to 10 pids if not enough
if args.add_products and len(topk_pids) < num_recommendations:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in topk_pids:
continue
topk_pids.append(cand_pid)
if len(topk_pids) >= num_recommendations:
break
# end of add
pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest!
return pred_labels
def test(args):
policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs)
path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs)
train_labels = load_labels(args.dataset, 'train')
test_labels = load_labels(args.dataset, ' | """Compute metrics for predicted recommendations.
Args:
topk_matches: a list or dict of product ids in ascending order.
"""
invalid_users = []
# Compute metrics
precisions, recalls, ndcgs, hits, fairness = [], [], [], [], []
test_user_idxs = list(test_user_products.keys())
for uid in test_user_idxs:
if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if len(pred_list) == 0:
continue
dcg = 0.0
hit_num = 0.0
for i in range(len(pred_list)):
if pred_list[i] in rel_set: | identifier_body |
|
test_agent.py | .append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if len(pred_list) == 0:
continue
dcg = 0.0
hit_num = 0.0
for i in range(len(pred_list)):
if pred_list[i] in rel_set:
dcg += 1. / (log(i + 2) / log(2))
hit_num += 1
# idcg
idcg = 0.0
for i in range(min(len(rel_set), len(pred_list))):
idcg += 1. / (log(i + 2) / log(2))
ndcg = dcg / idcg
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
ndcgs.append(ndcg)
recalls.append(recall)
precisions.append(precision)
hits.append(hit)
fairness.append(calculate_fairness(pred_list, brand_dict))
avg_precision = np.mean(precisions) * 100
avg_recall = np.mean(recalls) * 100
avg_ndcg = np.mean(ndcgs) * 100
avg_hit = np.mean(hits) * 100
avg_fairness = np.mean(fairness)
print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format(
avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))
def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]):
def | (batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(uids) # numpy of [bs, dim]
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
model.eval()
for hop in range(3):
state_tensor = torch.FloatTensor(state_pool).to(device)
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
actmask_tensor = torch.ByteTensor(actmask_pool).to(device)
probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool = [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
next_node_type = KG_RELATION[path[-1][1]][relation]
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
path_pool = new_path_pool
probs_pool = new_probs_pool
if hop < 2:
state_pool = env._batch_get_state(path_pool)
return path_pool, probs_pool
def predict_paths(policy_file, path_file, args):
print('Predicting paths...')
env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
pretrain_sd = torch.load(policy_file)
model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
test_labels = load_labels(args.dataset, 'test')
test_uids = list(test_labels.keys())
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args):
embeds = load_embed(args.dataset)
user_embeds = embeds[USER]
purchase_embeds = embeds[PURCHASE][0]
product_embeds = embeds[PRODUCT]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != PRODUCT:
continue
uid = path[0][2]
if uid not in pred_paths:
continue
pid = path[-1][2]
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
for pid in pred_paths[uid]:
if pid in train_pids:
continue
# Get the path with highest probability
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
# 3) Compute top 10 recommended products for each user.
sort_by = 'score'
pred_labels = {}
for uid in best_pred_paths:
if sort_by == 'score':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True)
elif sort_by == 'prob':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True)
topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest
# add up to 10 pids if not enough
if args.add_products and len(topk_pids) < num_recommendations:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in topk_pids:
continue
topk_pids.append(cand_pid)
if len(topk_pids) >= num_recommendations:
break
# end of add
pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest!
return pred_labels
def test(args):
policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs)
path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs)
train_labels = load_labels(args.dataset, 'train')
test_labels = load_labels(args.dataset, 'test')
if args.run_path:
predict_paths(policy_file, path_file, args)
if args.run_eval:
pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args)
evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict)
if __name__ == '__main__':
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', | _batch_acts_to_masks | identifier_name |
non_blocking.rs | will drop the `_guard` and any remaining logs should get flushed
/// # }
/// }
/// ```
#[must_use]
#[derive(Debug)]
pub struct WorkerGuard {
handle: Option<JoinHandle<()>>,
sender: Sender<Msg>,
shutdown: Sender<()>,
}
/// A non-blocking writer.
///
/// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically
/// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events
/// as they are emitted, an application might find the latency profile to be unacceptable.
/// `NonBlocking` moves the writing out of an application's data path by sending spans and events
/// to a dedicated logging thread.
///
/// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber`
/// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module
/// or with any other collector/subscriber implementation that uses the `MakeWriter` trait.
///
/// [make_writer]: tracing_subscriber::fmt::MakeWriter
/// [fmt]: mod@tracing_subscriber::fmt
#[derive(Clone, Debug)]
pub struct NonBlocking {
error_counter: ErrorCounter,
channel: Sender<Msg>,
is_lossy: bool,
}
/// Tracks the number of times a log line was dropped by the background thread.
///
/// If the non-blocking writer is not configured in [lossy mode], the error
/// count should always be 0.
///
/// [lossy mode]: NonBlockingBuilder::lossy
#[derive(Clone, Debug)]
pub struct ErrorCounter(Arc<AtomicUsize>);
impl NonBlocking {
/// Returns a new `NonBlocking` writer wrapping the provided `writer`.
///
/// The returned `NonBlocking` writer will have the [default configuration][default] values.
/// Other configurations can be specified using the [builder] interface.
///
/// [default]: NonBlockingBuilder::default()
/// [builder]: NonBlockingBuilder
pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) {
NonBlockingBuilder::default().finish(writer)
}
fn create<T: Write + Send + Sync + 'static>(
writer: T,
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
) -> (NonBlocking, WorkerGuard) {
let (sender, receiver) = bounded(buffered_lines_limit);
let (shutdown_sender, shutdown_receiver) = bounded(0);
let worker = Worker::new(receiver, writer, shutdown_receiver);
let worker_guard = WorkerGuard::new(
worker.worker_thread(thread_name),
sender.clone(),
shutdown_sender,
);
(
Self {
channel: sender,
error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))),
is_lossy,
},
worker_guard,
)
}
/// Returns a counter for the number of times logs where dropped. This will always return zero if
/// `NonBlocking` is not lossy.
pub fn error_counter(&self) -> ErrorCounter {
self.error_counter.clone()
}
}
/// A builder for [`NonBlocking`][non-blocking].
///
/// [non-blocking]: NonBlocking
#[derive(Debug)]
pub struct NonBlockingBuilder {
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
}
impl NonBlockingBuilder {
/// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders
pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder {
self.buffered_lines_limit = buffered_lines_limit;
self
}
/// Sets whether `NonBlocking` should be lossy or not.
///
/// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure
/// will be exerted on senders, blocking them until the buffer has capacity again.
///
/// By default, the built `NonBlocking` will be lossy.
pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder {
self.is_lossy = is_lossy;
self
}
/// Override the worker thread's name.
///
/// The default worker thread name is "tracing-appender".
pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder {
self.thread_name = name.to_string();
self
}
/// Completes the builder, returning the configured `NonBlocking`.
pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) {
NonBlocking::create(
writer,
self.buffered_lines_limit,
self.is_lossy,
self.thread_name,
)
}
}
impl Default for NonBlockingBuilder {
fn default() -> Self {
NonBlockingBuilder {
buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT,
is_lossy: true,
thread_name: "tracing-appender".to_string(),
}
}
}
impl std::io::Write for NonBlocking {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let buf_size = buf.len();
if self.is_lossy {
if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() {
self.error_counter.incr_saturating();
}
} else {
return match self.channel.send(Msg::Line(buf.to_vec())) {
Ok(_) => Ok(buf_size),
Err(_) => Err(io::Error::from(io::ErrorKind::Other)),
};
}
Ok(buf_size)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.write(buf).map(|_| ())
}
}
impl<'a> MakeWriter<'a> for NonBlocking {
type Writer = NonBlocking;
fn make_writer(&'a self) -> Self::Writer {
self.clone()
}
}
impl WorkerGuard {
fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self {
WorkerGuard {
handle: Some(handle),
sender,
shutdown,
}
}
}
impl Drop for WorkerGuard {
fn drop(&mut self) {
let timeout = Duration::from_millis(100);
match self.sender.send_timeout(Msg::Shutdown, timeout) {
Ok(_) => {
// Attempt to wait for `Worker` to flush all messages before dropping. This happens
// when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout`
// so that drop is not blocked indefinitely.
// TODO: Make timeout configurable.
let timeout = Duration::from_millis(1000);
match self.shutdown.send_timeout((), timeout) {
Err(SendTimeoutError::Timeout(_)) => {
eprintln!(
"Shutting down logging worker timed out after {:?}.",
timeout
);
}
_ => {
// At this point it is safe to wait for `Worker` destruction without blocking
if let Some(handle) = self.handle.take() {
if handle.join().is_err() {
eprintln!("Logging worker thread panicked");
}
};
}
}
}
Err(SendTimeoutError::Disconnected(_)) => (),
Err(SendTimeoutError::Timeout(_)) => eprintln!(
"Sending shutdown signal to logging worker timed out after {:?}",
timeout
),
}
}
}
// === impl ErrorCounter ===
impl ErrorCounter {
/// Returns the number of log lines that have been dropped.
///
/// If the non-blocking writer is not configured in [lossy mode], the error
/// count should always be 0.
///
/// [lossy mode]: NonBlockingBuilder::lossy
pub fn dropped_lines(&self) -> usize {
self.0.load(Ordering::Acquire)
}
fn incr_saturating(&self) {
let mut curr = self.0.load(Ordering::Acquire);
// We don't need to enter the CAS loop if the current value is already
// `usize::MAX`.
if curr == usize::MAX {
return;
}
// This is implemented as a CAS loop rather than as a simple
// `fetch_add`, because we don't want to wrap on overflow. Instead, we
// need to ensure that saturating addition is performed.
loop {
let val = curr.saturating_add(1);
match self
.0
.compare_exchange(curr, val, Ordering::AcqRel, Ordering::Acquire)
{
Ok(_) => return,
Err(actual) => curr = actual,
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
struct MockWriter {
tx: mpsc::SyncSender<String>,
}
impl MockWriter {
fn new(capacity: usize) -> (Self, mpsc::Receiver<String>) {
let (tx, rx) = mpsc::sync_channel(capacity);
(Self { tx }, rx)
}
}
impl std::io::Write for MockWriter {
fn | write | identifier_name |
|
non_blocking.rs | backpressure will be exerted on senders, causing them to block their
/// respective threads until there is available capacity.
///
/// [non-blocking]: NonBlocking
/// Recommended to be a power of 2.
pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000;
/// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop
///
/// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying
/// output. Instead, the span or event will be written by a dedicated logging thread at some later point.
/// To increase throughput, the non-blocking writer will flush to the underlying output on
/// a periodic basis rather than every time a span or event is written. This means that if the program
/// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans
/// or events may not be written.
///
/// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure,
/// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output.
/// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is.
/// This will ensure that the guard will be dropped during an unwinding or when `main` exits
/// successfully.
///
/// # Examples
///
/// ``` rust
/// # #[clippy::allow(needless_doctest_main)]
/// fn main () {
/// # fn doc() {
/// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout());
/// let collector = tracing_subscriber::fmt().with_writer(non_blocking);
/// tracing::collect::with_default(collector.finish(), || {
/// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber
/// tracing::event!(tracing::Level::INFO, "Hello");
/// });
/// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed
/// # }
/// }
/// ```
#[must_use]
#[derive(Debug)]
pub struct WorkerGuard {
handle: Option<JoinHandle<()>>,
sender: Sender<Msg>,
shutdown: Sender<()>,
}
/// A non-blocking writer.
///
/// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically
/// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events
/// as they are emitted, an application might find the latency profile to be unacceptable.
/// `NonBlocking` moves the writing out of an application's data path by sending spans and events
/// to a dedicated logging thread.
///
/// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber`
/// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module
/// or with any other collector/subscriber implementation that uses the `MakeWriter` trait.
///
/// [make_writer]: tracing_subscriber::fmt::MakeWriter
/// [fmt]: mod@tracing_subscriber::fmt
#[derive(Clone, Debug)]
pub struct NonBlocking {
error_counter: ErrorCounter,
channel: Sender<Msg>,
is_lossy: bool,
}
/// Tracks the number of times a log line was dropped by the background thread.
///
/// If the non-blocking writer is not configured in [lossy mode], the error
/// count should always be 0.
///
/// [lossy mode]: NonBlockingBuilder::lossy
#[derive(Clone, Debug)]
pub struct ErrorCounter(Arc<AtomicUsize>);
impl NonBlocking {
/// Returns a new `NonBlocking` writer wrapping the provided `writer`.
///
/// The returned `NonBlocking` writer will have the [default configuration][default] values.
/// Other configurations can be specified using the [builder] interface.
///
/// [default]: NonBlockingBuilder::default()
/// [builder]: NonBlockingBuilder
pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) {
NonBlockingBuilder::default().finish(writer)
}
fn create<T: Write + Send + Sync + 'static>(
writer: T,
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
) -> (NonBlocking, WorkerGuard) {
let (sender, receiver) = bounded(buffered_lines_limit);
let (shutdown_sender, shutdown_receiver) = bounded(0);
let worker = Worker::new(receiver, writer, shutdown_receiver);
let worker_guard = WorkerGuard::new(
worker.worker_thread(thread_name),
sender.clone(),
shutdown_sender,
);
(
Self {
channel: sender,
error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))),
is_lossy,
},
worker_guard,
)
}
/// Returns a counter for the number of times logs where dropped. This will always return zero if
/// `NonBlocking` is not lossy.
pub fn error_counter(&self) -> ErrorCounter {
self.error_counter.clone()
}
}
/// A builder for [`NonBlocking`][non-blocking].
///
/// [non-blocking]: NonBlocking
#[derive(Debug)]
pub struct NonBlockingBuilder {
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
}
impl NonBlockingBuilder {
/// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders
pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder {
self.buffered_lines_limit = buffered_lines_limit;
self
}
/// Sets whether `NonBlocking` should be lossy or not.
///
/// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure
/// will be exerted on senders, blocking them until the buffer has capacity again.
///
/// By default, the built `NonBlocking` will be lossy.
pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder {
self.is_lossy = is_lossy;
self
}
/// Override the worker thread's name.
///
/// The default worker thread name is "tracing-appender".
pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder {
self.thread_name = name.to_string();
self
}
/// Completes the builder, returning the configured `NonBlocking`.
pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) {
NonBlocking::create(
writer,
self.buffered_lines_limit,
self.is_lossy,
self.thread_name,
)
}
}
impl Default for NonBlockingBuilder {
fn default() -> Self {
NonBlockingBuilder {
buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT,
is_lossy: true,
thread_name: "tracing-appender".to_string(),
}
}
}
impl std::io::Write for NonBlocking {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let buf_size = buf.len();
if self.is_lossy {
if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() {
self.error_counter.incr_saturating();
}
} else |
Ok(buf_size)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.write(buf).map(|_| ())
}
}
impl<'a> MakeWriter<'a> for NonBlocking {
type Writer = NonBlocking;
fn make_writer(&'a self) -> Self::Writer {
self.clone()
}
}
impl WorkerGuard {
fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self {
WorkerGuard {
handle: Some(handle),
sender,
shutdown,
}
}
}
impl Drop for WorkerGuard {
fn drop(&mut self) {
let timeout = Duration::from_millis(100);
match self.sender.send_timeout(Msg::Shutdown, timeout) {
Ok(_) => {
// Attempt to wait for `Worker` to flush all messages before dropping. This happens
// when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout`
// so that drop is not blocked indefinitely.
// TODO: Make timeout configurable.
let timeout = Duration::from_millis(1000);
match self.shutdown.send_timeout((), timeout) {
Err(SendTimeoutError::Timeout(_)) => {
eprintln!(
"Shutting down logging worker timed out after {:?}.",
timeout
);
}
_ => {
// At this point it is safe to wait for `Worker` destruction without blocking
if let Some(handle) = self.handle.take() {
if handle.join().is_err() {
eprintln!("Logging worker thread panicked");
}
};
}
}
}
Err(SendTimeoutError::Disconnected | {
return match self.channel.send(Msg::Line(buf.to_vec())) {
Ok(_) => Ok(buf_size),
Err(_) => Err(io::Error::from(io::ErrorKind::Other)),
};
} | conditional_block |
non_blocking.rs | backpressure will be exerted on senders, causing them to block their
/// respective threads until there is available capacity.
///
/// [non-blocking]: NonBlocking
/// Recommended to be a power of 2.
pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000;
/// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop
///
/// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying
/// output. Instead, the span or event will be written by a dedicated logging thread at some later point.
/// To increase throughput, the non-blocking writer will flush to the underlying output on
/// a periodic basis rather than every time a span or event is written. This means that if the program
/// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans
/// or events may not be written.
///
/// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure,
/// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output.
/// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is.
/// This will ensure that the guard will be dropped during an unwinding or when `main` exits
/// successfully.
///
/// # Examples
///
/// ``` rust
/// # #[clippy::allow(needless_doctest_main)]
/// fn main () {
/// # fn doc() {
/// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout());
/// let collector = tracing_subscriber::fmt().with_writer(non_blocking);
/// tracing::collect::with_default(collector.finish(), || {
/// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber
/// tracing::event!(tracing::Level::INFO, "Hello");
/// });
/// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed
/// # }
/// }
/// ```
#[must_use]
#[derive(Debug)]
pub struct WorkerGuard {
handle: Option<JoinHandle<()>>,
sender: Sender<Msg>,
shutdown: Sender<()>,
}
/// A non-blocking writer.
///
/// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically
/// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events
/// as they are emitted, an application might find the latency profile to be unacceptable.
/// `NonBlocking` moves the writing out of an application's data path by sending spans and events
/// to a dedicated logging thread.
///
/// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber`
/// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module
/// or with any other collector/subscriber implementation that uses the `MakeWriter` trait.
///
/// [make_writer]: tracing_subscriber::fmt::MakeWriter
/// [fmt]: mod@tracing_subscriber::fmt
#[derive(Clone, Debug)]
pub struct NonBlocking {
error_counter: ErrorCounter,
channel: Sender<Msg>,
is_lossy: bool,
}
/// Tracks the number of times a log line was dropped by the background thread.
///
/// If the non-blocking writer is not configured in [lossy mode], the error
/// count should always be 0.
///
/// [lossy mode]: NonBlockingBuilder::lossy
#[derive(Clone, Debug)]
pub struct ErrorCounter(Arc<AtomicUsize>);
impl NonBlocking {
/// Returns a new `NonBlocking` writer wrapping the provided `writer`.
///
/// The returned `NonBlocking` writer will have the [default configuration][default] values.
/// Other configurations can be specified using the [builder] interface.
///
/// [default]: NonBlockingBuilder::default()
/// [builder]: NonBlockingBuilder
pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) {
NonBlockingBuilder::default().finish(writer)
}
fn create<T: Write + Send + Sync + 'static>(
writer: T,
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
) -> (NonBlocking, WorkerGuard) {
let (sender, receiver) = bounded(buffered_lines_limit);
let (shutdown_sender, shutdown_receiver) = bounded(0);
let worker = Worker::new(receiver, writer, shutdown_receiver);
let worker_guard = WorkerGuard::new(
worker.worker_thread(thread_name),
sender.clone(),
shutdown_sender,
);
(
Self {
channel: sender,
error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))),
is_lossy,
},
worker_guard,
)
}
/// Returns a counter for the number of times logs where dropped. This will always return zero if
/// `NonBlocking` is not lossy.
pub fn error_counter(&self) -> ErrorCounter {
self.error_counter.clone()
}
}
/// A builder for [`NonBlocking`][non-blocking].
///
/// [non-blocking]: NonBlocking
#[derive(Debug)]
pub struct NonBlockingBuilder {
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
}
impl NonBlockingBuilder {
/// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders
pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder {
self.buffered_lines_limit = buffered_lines_limit;
self
}
/// Sets whether `NonBlocking` should be lossy or not.
///
/// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure
/// will be exerted on senders, blocking them until the buffer has capacity again. | ///
/// By default, the built `NonBlocking` will be lossy.
pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder {
self.is_lossy = is_lossy;
self
}
/// Override the worker thread's name.
///
/// The default worker thread name is "tracing-appender".
pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder {
self.thread_name = name.to_string();
self
}
/// Completes the builder, returning the configured `NonBlocking`.
pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) {
NonBlocking::create(
writer,
self.buffered_lines_limit,
self.is_lossy,
self.thread_name,
)
}
}
impl Default for NonBlockingBuilder {
fn default() -> Self {
NonBlockingBuilder {
buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT,
is_lossy: true,
thread_name: "tracing-appender".to_string(),
}
}
}
impl std::io::Write for NonBlocking {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let buf_size = buf.len();
if self.is_lossy {
if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() {
self.error_counter.incr_saturating();
}
} else {
return match self.channel.send(Msg::Line(buf.to_vec())) {
Ok(_) => Ok(buf_size),
Err(_) => Err(io::Error::from(io::ErrorKind::Other)),
};
}
Ok(buf_size)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.write(buf).map(|_| ())
}
}
impl<'a> MakeWriter<'a> for NonBlocking {
type Writer = NonBlocking;
fn make_writer(&'a self) -> Self::Writer {
self.clone()
}
}
impl WorkerGuard {
fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self {
WorkerGuard {
handle: Some(handle),
sender,
shutdown,
}
}
}
impl Drop for WorkerGuard {
fn drop(&mut self) {
let timeout = Duration::from_millis(100);
match self.sender.send_timeout(Msg::Shutdown, timeout) {
Ok(_) => {
// Attempt to wait for `Worker` to flush all messages before dropping. This happens
// when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout`
// so that drop is not blocked indefinitely.
// TODO: Make timeout configurable.
let timeout = Duration::from_millis(1000);
match self.shutdown.send_timeout((), timeout) {
Err(SendTimeoutError::Timeout(_)) => {
eprintln!(
"Shutting down logging worker timed out after {:?}.",
timeout
);
}
_ => {
// At this point it is safe to wait for `Worker` destruction without blocking
if let Some(handle) = self.handle.take() {
if handle.join().is_err() {
eprintln!("Logging worker thread panicked");
}
};
}
}
}
Err(SendTimeoutError::Disconnected(_)) | random_line_split |
|
non_blocking.rs | backpressure will be exerted on senders, causing them to block their
/// respective threads until there is available capacity.
///
/// [non-blocking]: NonBlocking
/// Recommended to be a power of 2.
pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000;
/// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop
///
/// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying
/// output. Instead, the span or event will be written by a dedicated logging thread at some later point.
/// To increase throughput, the non-blocking writer will flush to the underlying output on
/// a periodic basis rather than every time a span or event is written. This means that if the program
/// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans
/// or events may not be written.
///
/// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure,
/// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output.
/// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is.
/// This will ensure that the guard will be dropped during an unwinding or when `main` exits
/// successfully.
///
/// # Examples
///
/// ``` rust
/// # #[clippy::allow(needless_doctest_main)]
/// fn main () {
/// # fn doc() {
/// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout());
/// let collector = tracing_subscriber::fmt().with_writer(non_blocking);
/// tracing::collect::with_default(collector.finish(), || {
/// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber
/// tracing::event!(tracing::Level::INFO, "Hello");
/// });
/// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed
/// # }
/// }
/// ```
#[must_use]
#[derive(Debug)]
pub struct WorkerGuard {
handle: Option<JoinHandle<()>>,
sender: Sender<Msg>,
shutdown: Sender<()>,
}
/// A non-blocking writer.
///
/// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically
/// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events
/// as they are emitted, an application might find the latency profile to be unacceptable.
/// `NonBlocking` moves the writing out of an application's data path by sending spans and events
/// to a dedicated logging thread.
///
/// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber`
/// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module
/// or with any other collector/subscriber implementation that uses the `MakeWriter` trait.
///
/// [make_writer]: tracing_subscriber::fmt::MakeWriter
/// [fmt]: mod@tracing_subscriber::fmt
#[derive(Clone, Debug)]
pub struct NonBlocking {
error_counter: ErrorCounter,
channel: Sender<Msg>,
is_lossy: bool,
}
/// Tracks the number of times a log line was dropped by the background thread.
///
/// If the non-blocking writer is not configured in [lossy mode], the error
/// count should always be 0.
///
/// [lossy mode]: NonBlockingBuilder::lossy
#[derive(Clone, Debug)]
pub struct ErrorCounter(Arc<AtomicUsize>);
impl NonBlocking {
/// Returns a new `NonBlocking` writer wrapping the provided `writer`.
///
/// The returned `NonBlocking` writer will have the [default configuration][default] values.
/// Other configurations can be specified using the [builder] interface.
///
/// [default]: NonBlockingBuilder::default()
/// [builder]: NonBlockingBuilder
pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) {
NonBlockingBuilder::default().finish(writer)
}
fn create<T: Write + Send + Sync + 'static>(
writer: T,
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
) -> (NonBlocking, WorkerGuard) {
let (sender, receiver) = bounded(buffered_lines_limit);
let (shutdown_sender, shutdown_receiver) = bounded(0);
let worker = Worker::new(receiver, writer, shutdown_receiver);
let worker_guard = WorkerGuard::new(
worker.worker_thread(thread_name),
sender.clone(),
shutdown_sender,
);
(
Self {
channel: sender,
error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))),
is_lossy,
},
worker_guard,
)
}
/// Returns a counter for the number of times logs where dropped. This will always return zero if
/// `NonBlocking` is not lossy.
pub fn error_counter(&self) -> ErrorCounter {
self.error_counter.clone()
}
}
/// A builder for [`NonBlocking`][non-blocking].
///
/// [non-blocking]: NonBlocking
#[derive(Debug)]
pub struct NonBlockingBuilder {
buffered_lines_limit: usize,
is_lossy: bool,
thread_name: String,
}
impl NonBlockingBuilder {
/// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders
pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder {
self.buffered_lines_limit = buffered_lines_limit;
self
}
/// Sets whether `NonBlocking` should be lossy or not.
///
/// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure
/// will be exerted on senders, blocking them until the buffer has capacity again.
///
/// By default, the built `NonBlocking` will be lossy.
pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder |
/// Override the worker thread's name.
///
/// The default worker thread name is "tracing-appender".
pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder {
self.thread_name = name.to_string();
self
}
/// Completes the builder, returning the configured `NonBlocking`.
pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) {
NonBlocking::create(
writer,
self.buffered_lines_limit,
self.is_lossy,
self.thread_name,
)
}
}
impl Default for NonBlockingBuilder {
fn default() -> Self {
NonBlockingBuilder {
buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT,
is_lossy: true,
thread_name: "tracing-appender".to_string(),
}
}
}
impl std::io::Write for NonBlocking {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let buf_size = buf.len();
if self.is_lossy {
if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() {
self.error_counter.incr_saturating();
}
} else {
return match self.channel.send(Msg::Line(buf.to_vec())) {
Ok(_) => Ok(buf_size),
Err(_) => Err(io::Error::from(io::ErrorKind::Other)),
};
}
Ok(buf_size)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.write(buf).map(|_| ())
}
}
impl<'a> MakeWriter<'a> for NonBlocking {
type Writer = NonBlocking;
fn make_writer(&'a self) -> Self::Writer {
self.clone()
}
}
impl WorkerGuard {
fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self {
WorkerGuard {
handle: Some(handle),
sender,
shutdown,
}
}
}
impl Drop for WorkerGuard {
fn drop(&mut self) {
let timeout = Duration::from_millis(100);
match self.sender.send_timeout(Msg::Shutdown, timeout) {
Ok(_) => {
// Attempt to wait for `Worker` to flush all messages before dropping. This happens
// when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout`
// so that drop is not blocked indefinitely.
// TODO: Make timeout configurable.
let timeout = Duration::from_millis(1000);
match self.shutdown.send_timeout((), timeout) {
Err(SendTimeoutError::Timeout(_)) => {
eprintln!(
"Shutting down logging worker timed out after {:?}.",
timeout
);
}
_ => {
// At this point it is safe to wait for `Worker` destruction without blocking
if let Some(handle) = self.handle.take() {
if handle.join().is_err() {
eprintln!("Logging worker thread panicked");
}
};
}
}
}
Err(SendTimeoutError::Disconnected | {
self.is_lossy = is_lossy;
self
} | identifier_body |
key.rs | ///
/// // Until here memory buffer is read/write. Turns-it into a key
/// let key = ProtKey::new(buf_rnd);
///
/// // Or more simply, like this with exactly the same result
/// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key();
///
/// {
/// // Request access in read-mode
/// let key_read = key.read();
/// let byte = key_read[16];
/// // ...
/// } // Relinquish its read-access
///
/// // Alternative way to read its content
/// key.read_with(|k| encrypt(&k[..], b"abc"));
///
/// // Access it in write-mode
/// let key_write = key.try_write();
/// if let Some(mut kw) = key_write {
/// kw[16] = 42;
/// }
/// # }
/// ```
pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> {
key: RefCell<ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<T: Copy, A: KeyAllocator> ProtKey<T, A> {
/// Take ownership of `prot_buf` and transform it into a `ProtKey`. By
/// default prevent any access.
pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> {
unsafe {
<A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8,
prot_buf.len_bytes());
}
ProtKey {
key: RefCell::new(prot_buf),
read_ctr: Rc::new(Cell::new(NOREAD))
}
}
/// Consume and copy `prot_buf` to force using `ProtKey`'s allocator.
/// If `prot_buf` already uses a `KeyAllocator` there is no need to make
/// a copy so directly call the default cstor `new` instead.
pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> {
let buf = ProtBuf::from_slice(&prot_buf);
ProtKey::new(buf)
}
/// Return a wrapper to the key in read mode. This method `panic!` if
/// this key is already accessed in write mode.
// FIXME: Not sure if it's the best interface to provide a `try_read`
// variant to this `fail`ing method. It would maybe be better to
// implement a single method returning a `Result`. See this RFC
// https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md
pub fn read(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone())
}
/// Return a wrapper to the key in read mode. Return `None`
/// if the key is already accessed in write mode.
pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> {
match self.key.borrow_state() {
BorrowState::Reading|BorrowState::Unused => Some(self.read()),
_ => None
}
}
/// Access the key in read mode and pass a reference to closure `f`.
/// The key can only be read during this call. This method will `panic!`
/// if a read access cannot be acquired on this key.
pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){
f(self.read())
}
/// Return a wrapper to the key in write mode. This method `panic!` if
/// the key is already currently accessed in read or write mode.
pub fn write(&self) -> ProtKeyWrite<T, A> {
let key_write = ProtKeyWrite::new(self.key.borrow_mut());
assert_eq!(self.read_ctr.get(), NOREAD);
key_write
}
/// Return a wrapper to the key in write mode. Return `None`
/// if the key is already accessed in read or write mode.
pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> {
match self.key.borrow_state() {
BorrowState::Unused => Some(self.write()),
_ => None
}
}
/// Access the key in write mode and pass a reference to closure `f`.
/// The key can only be writtent during this call. This method will
/// `panic!` if a write access cannot be acquired on this key.
pub fn write_with<F>(&self, mut f: F)
where F: FnMut(&mut ProtKeyWrite<T, A>) {
f(&mut self.write())
}
}
impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> {
fn drop(&mut self) {
// FIXME: without this assert this drop is useless.
assert_eq!(self.read_ctr.get(), NOREAD);
}
}
impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> {
fn clone(&self) -> ProtKey<T, A> {
ProtKey::new(self.read().clone())
}
}
impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> {
fn eq(&self, other: &ProtKey<T, A>) -> bool {
match (self.try_read(), other.try_read()) {
(Some(ref s), Some(ref o)) => *s == *o,
(_, _) => false
}
}
}
impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(r) => r.fmt(f),
None => Err(fmt::Error)
}
}
}
/// An RAII protected key with read access
///
/// This instance is the result of a `read` request on a `ProtKey`. If no
/// other similar instance on the same `ProtKey` exists, raw memory access
/// will be revoked when this instance is destructed.
pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> {
fn new(ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> {
if read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8,
ref_key.len_bytes());
}
}
read_ctr.set(read_ctr.get().checked_add(1).unwrap());
ProtKeyRead {
ref_key: ref_key,
read_ctr: read_ctr
}
}
/// Clone this instance.
// FIXME: Currently does not implement `clone()` as it would interfere
// with `ProtKey::clone()`.
pub fn clone_it(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone())
}
}
impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> {
fn drop(&mut self) {
self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap());
if self.read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_none(
self.ref_key.as_ptr() as *mut u8,
self.ref_key.len_bytes());
}
}
}
}
impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> {
type Target = ProtBuf<T, A>;
fn deref(&self) -> &ProtBuf<T, A> {
&*self.ref_key
}
}
impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> {
fn as_ref(&self) -> &[T] {
&***self
}
}
impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> {
fn eq(&self, other: &ProtKeyRead<T, A>) -> bool {
**self == **other
}
}
impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
/// An RAII protected key with write access
///
/// This instance is the result of a `write` request on a `ProtKey`. Its
/// raw memory may only be written during the lifetime of this object.
pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: RefMut<'a, ProtBuf<T, A>>,
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> {
fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, | {
self.ref_key.fmt(f)
} | identifier_body |
key.rs | );
///
/// // Until here memory buffer is read/write. Turns-it into a key
/// let key = ProtKey::new(buf_rnd);
///
/// // Or more simply, like this with exactly the same result
/// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key();
///
/// {
/// // Request access in read-mode
/// let key_read = key.read();
/// let byte = key_read[16];
/// // ...
/// } // Relinquish its read-access
///
/// // Alternative way to read its content
/// key.read_with(|k| encrypt(&k[..], b"abc"));
///
/// // Access it in write-mode
/// let key_write = key.try_write();
/// if let Some(mut kw) = key_write {
/// kw[16] = 42;
/// }
/// # }
/// ```
pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> {
key: RefCell<ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<T: Copy, A: KeyAllocator> ProtKey<T, A> {
/// Take ownership of `prot_buf` and transform it into a `ProtKey`. By
/// default prevent any access.
pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> {
unsafe {
<A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8,
prot_buf.len_bytes());
}
ProtKey {
key: RefCell::new(prot_buf),
read_ctr: Rc::new(Cell::new(NOREAD))
}
}
/// Consume and copy `prot_buf` to force using `ProtKey`'s allocator.
/// If `prot_buf` already uses a `KeyAllocator` there is no need to make
/// a copy so directly call the default cstor `new` instead.
pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> {
let buf = ProtBuf::from_slice(&prot_buf);
ProtKey::new(buf)
}
/// Return a wrapper to the key in read mode. This method `panic!` if
/// this key is already accessed in write mode.
// FIXME: Not sure if it's the best interface to provide a `try_read`
// variant to this `fail`ing method. It would maybe be better to
// implement a single method returning a `Result`. See this RFC
// https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md
pub fn read(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone())
}
/// Return a wrapper to the key in read mode. Return `None`
/// if the key is already accessed in write mode.
pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> {
match self.key.borrow_state() {
BorrowState::Reading|BorrowState::Unused => Some(self.read()),
_ => None
}
}
/// Access the key in read mode and pass a reference to closure `f`.
/// The key can only be read during this call. This method will `panic!`
/// if a read access cannot be acquired on this key.
pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){
f(self.read())
}
/// Return a wrapper to the key in write mode. This method `panic!` if
/// the key is already currently accessed in read or write mode.
pub fn write(&self) -> ProtKeyWrite<T, A> {
let key_write = ProtKeyWrite::new(self.key.borrow_mut());
assert_eq!(self.read_ctr.get(), NOREAD);
key_write
}
/// Return a wrapper to the key in write mode. Return `None`
/// if the key is already accessed in read or write mode.
pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> {
match self.key.borrow_state() {
BorrowState::Unused => Some(self.write()),
_ => None
}
}
/// Access the key in write mode and pass a reference to closure `f`.
/// The key can only be writtent during this call. This method will
/// `panic!` if a write access cannot be acquired on this key.
pub fn write_with<F>(&self, mut f: F)
where F: FnMut(&mut ProtKeyWrite<T, A>) {
f(&mut self.write())
}
}
impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> {
fn drop(&mut self) {
// FIXME: without this assert this drop is useless.
assert_eq!(self.read_ctr.get(), NOREAD);
}
}
impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> {
fn clone(&self) -> ProtKey<T, A> {
ProtKey::new(self.read().clone())
}
}
impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> {
fn eq(&self, other: &ProtKey<T, A>) -> bool {
match (self.try_read(), other.try_read()) {
(Some(ref s), Some(ref o)) => *s == *o,
(_, _) => false
}
}
}
impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(r) => r.fmt(f),
None => Err(fmt::Error)
}
}
}
/// An RAII protected key with read access
///
/// This instance is the result of a `read` request on a `ProtKey`. If no
/// other similar instance on the same `ProtKey` exists, raw memory access
/// will be revoked when this instance is destructed.
pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> {
fn new(ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> {
if read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8,
ref_key.len_bytes());
}
}
read_ctr.set(read_ctr.get().checked_add(1).unwrap());
ProtKeyRead {
ref_key: ref_key,
read_ctr: read_ctr
}
}
/// Clone this instance.
// FIXME: Currently does not implement `clone()` as it would interfere
// with `ProtKey::clone()`. | }
impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> {
fn drop(&mut self) {
self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap());
if self.read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_none(
self.ref_key.as_ptr() as *mut u8,
self.ref_key.len_bytes());
}
}
}
}
impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> {
type Target = ProtBuf<T, A>;
fn deref(&self) -> &ProtBuf<T, A> {
&*self.ref_key
}
}
impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> {
fn as_ref(&self) -> &[T] {
&***self
}
}
impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> {
fn eq(&self, other: &ProtKeyRead<T, A>) -> bool {
**self == **other
}
}
impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.ref_key.fmt(f)
}
}
/// An RAII protected key with write access
///
/// This instance is the result of a `write` request on a `ProtKey`. Its
/// raw memory may only be written during the lifetime of this object.
pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: RefMut<'a, ProtBuf<T, A>>,
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> {
fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, T | pub fn clone_it(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone())
} | random_line_split |
key.rs | ///
/// // Until here memory buffer is read/write. Turns-it into a key
/// let key = ProtKey::new(buf_rnd);
///
/// // Or more simply, like this with exactly the same result
/// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key();
///
/// {
/// // Request access in read-mode
/// let key_read = key.read();
/// let byte = key_read[16];
/// // ...
/// } // Relinquish its read-access
///
/// // Alternative way to read its content
/// key.read_with(|k| encrypt(&k[..], b"abc"));
///
/// // Access it in write-mode
/// let key_write = key.try_write();
/// if let Some(mut kw) = key_write {
/// kw[16] = 42;
/// }
/// # }
/// ```
pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> {
key: RefCell<ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<T: Copy, A: KeyAllocator> ProtKey<T, A> {
/// Take ownership of `prot_buf` and transform it into a `ProtKey`. By
/// default prevent any access.
pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> {
unsafe {
<A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8,
prot_buf.len_bytes());
}
ProtKey {
key: RefCell::new(prot_buf),
read_ctr: Rc::new(Cell::new(NOREAD))
}
}
/// Consume and copy `prot_buf` to force using `ProtKey`'s allocator.
/// If `prot_buf` already uses a `KeyAllocator` there is no need to make
/// a copy so directly call the default cstor `new` instead.
pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> {
let buf = ProtBuf::from_slice(&prot_buf);
ProtKey::new(buf)
}
/// Return a wrapper to the key in read mode. This method `panic!` if
/// this key is already accessed in write mode.
// FIXME: Not sure if it's the best interface to provide a `try_read`
// variant to this `fail`ing method. It would maybe be better to
// implement a single method returning a `Result`. See this RFC
// https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md
pub fn read(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone())
}
/// Return a wrapper to the key in read mode. Return `None`
/// if the key is already accessed in write mode.
pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> {
match self.key.borrow_state() {
BorrowState::Reading|BorrowState::Unused => Some(self.read()),
_ => None
}
}
/// Access the key in read mode and pass a reference to closure `f`.
/// The key can only be read during this call. This method will `panic!`
/// if a read access cannot be acquired on this key.
pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){
f(self.read())
}
/// Return a wrapper to the key in write mode. This method `panic!` if
/// the key is already currently accessed in read or write mode.
pub fn write(&self) -> ProtKeyWrite<T, A> {
let key_write = ProtKeyWrite::new(self.key.borrow_mut());
assert_eq!(self.read_ctr.get(), NOREAD);
key_write
}
/// Return a wrapper to the key in write mode. Return `None`
/// if the key is already accessed in read or write mode.
pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> {
match self.key.borrow_state() {
BorrowState::Unused => Some(self.write()),
_ => None
}
}
/// Access the key in write mode and pass a reference to closure `f`.
/// The key can only be writtent during this call. This method will
/// `panic!` if a write access cannot be acquired on this key.
pub fn write_with<F>(&self, mut f: F)
where F: FnMut(&mut ProtKeyWrite<T, A>) {
f(&mut self.write())
}
}
impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> {
fn drop(&mut self) {
// FIXME: without this assert this drop is useless.
assert_eq!(self.read_ctr.get(), NOREAD);
}
}
impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> {
fn | (&self) -> ProtKey<T, A> {
ProtKey::new(self.read().clone())
}
}
impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> {
fn eq(&self, other: &ProtKey<T, A>) -> bool {
match (self.try_read(), other.try_read()) {
(Some(ref s), Some(ref o)) => *s == *o,
(_, _) => false
}
}
}
impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(r) => r.fmt(f),
None => Err(fmt::Error)
}
}
}
/// An RAII protected key with read access
///
/// This instance is the result of a `read` request on a `ProtKey`. If no
/// other similar instance on the same `ProtKey` exists, raw memory access
/// will be revoked when this instance is destructed.
pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> {
fn new(ref_key: Ref<'a, ProtBuf<T, A>>,
read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> {
if read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8,
ref_key.len_bytes());
}
}
read_ctr.set(read_ctr.get().checked_add(1).unwrap());
ProtKeyRead {
ref_key: ref_key,
read_ctr: read_ctr
}
}
/// Clone this instance.
// FIXME: Currently does not implement `clone()` as it would interfere
// with `ProtKey::clone()`.
pub fn clone_it(&self) -> ProtKeyRead<T, A> {
ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone())
}
}
impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> {
fn drop(&mut self) {
self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap());
if self.read_ctr.get() == NOREAD {
unsafe {
<A as KeyAllocator>::protect_none(
self.ref_key.as_ptr() as *mut u8,
self.ref_key.len_bytes());
}
}
}
}
impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> {
type Target = ProtBuf<T, A>;
fn deref(&self) -> &ProtBuf<T, A> {
&*self.ref_key
}
}
impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> {
fn as_ref(&self) -> &[T] {
&***self
}
}
impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> {
fn eq(&self, other: &ProtKeyRead<T, A>) -> bool {
**self == **other
}
}
impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.ref_key.fmt(f)
}
}
/// An RAII protected key with write access
///
/// This instance is the result of a `write` request on a `ProtKey`. Its
/// raw memory may only be written during the lifetime of this object.
pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> {
ref_key: RefMut<'a, ProtBuf<T, A>>,
}
impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> {
fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, T | clone | identifier_name |
laptop.pb.go | }
func (m *Laptop) GetKeyboard() *Keyboard {
if m != nil {
return m.Keyboard
}
return nil
}
type isLaptop_Weight interface {
isLaptop_Weight()
}
type Laptop_WeightKg struct {
WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"`
}
type Laptop_WeightLb struct {
WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"`
}
func (*Laptop_WeightKg) isLaptop_Weight() {}
func (*Laptop_WeightLb) isLaptop_Weight() {}
func (m *Laptop) GetWeight() isLaptop_Weight {
if m != nil {
return m.Weight
}
return nil
}
func (m *Laptop) GetWeightKg() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightKg); ok {
return x.WeightKg
}
return 0
}
func (m *Laptop) GetWeightLb() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightLb); ok {
return x.WeightLb
}
return 0
}
func (m *Laptop) GetPriceUsd() float64 {
if m != nil {
return m.PriceUsd
}
return 0
}
func (m *Laptop) GetReleaseYear() uint32 {
if m != nil {
return m.ReleaseYear
}
return 0
}
func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp {
if m != nil {
return m.UpdatedAt
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Laptop) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Laptop_WeightKg)(nil),
(*Laptop_WeightLb)(nil),
}
}
type CreateLaptopRequest struct {
Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} }
func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopRequest) ProtoMessage() {}
func (*CreateLaptopRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{1}
}
func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b)
}
func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic)
}
func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopRequest.Merge(m, src)
}
func (m *CreateLaptopRequest) XXX_Size() int {
return xxx_messageInfo_CreateLaptopRequest.Size(m)
}
func (m *CreateLaptopRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo
func (m *CreateLaptopRequest) GetLaptop() *Laptop {
if m != nil {
return m.Laptop
}
return nil
}
type CreateLaptopResponse struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} }
func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopResponse) ProtoMessage() {}
func (*CreateLaptopResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{2}
}
func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b)
}
func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic)
}
func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopResponse.Merge(m, src)
}
func (m *CreateLaptopResponse) XXX_Size() int {
return xxx_messageInfo_CreateLaptopResponse.Size(m)
}
func (m *CreateLaptopResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo
func (m *CreateLaptopResponse) GetId() string {
if m != nil |
return ""
}
func init() {
proto.RegisterType((*Laptop)(nil), "pc.Laptop")
proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest")
proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse")
}
func init() {
proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705)
}
var fileDescriptor_28a7e4886f546705 = []byte{
// 459 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40,
0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72,
0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec,
0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2,
0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8,
0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1,
0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5,
0xed, 0x8f, 0x95, 0xa9, | {
return m.ID
} | conditional_block |
laptop.pb.go |
}
func (m *Laptop) GetKeyboard() *Keyboard {
if m != nil {
return m.Keyboard
}
return nil
}
type isLaptop_Weight interface {
isLaptop_Weight()
}
type Laptop_WeightKg struct {
WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"`
}
type Laptop_WeightLb struct {
WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"`
}
func (*Laptop_WeightKg) isLaptop_Weight() {}
func (*Laptop_WeightLb) isLaptop_Weight() {}
func (m *Laptop) GetWeight() isLaptop_Weight {
if m != nil {
return m.Weight
}
return nil
}
func (m *Laptop) GetWeightKg() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightKg); ok {
return x.WeightKg
}
return 0
}
func (m *Laptop) GetWeightLb() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightLb); ok {
return x.WeightLb
}
return 0
}
func (m *Laptop) GetPriceUsd() float64 {
if m != nil {
return m.PriceUsd
}
return 0
}
func (m *Laptop) GetReleaseYear() uint32 {
if m != nil {
return m.ReleaseYear
}
return 0
}
func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp {
if m != nil {
return m.UpdatedAt
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Laptop) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Laptop_WeightKg)(nil),
(*Laptop_WeightLb)(nil),
}
}
type CreateLaptopRequest struct {
Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} }
func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopRequest) ProtoMessage() {}
func (*CreateLaptopRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{1}
}
func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b)
}
func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic)
}
func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopRequest.Merge(m, src)
}
func (m *CreateLaptopRequest) XXX_Size() int {
return xxx_messageInfo_CreateLaptopRequest.Size(m)
}
func (m *CreateLaptopRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo
func (m *CreateLaptopRequest) GetLaptop() *Laptop {
if m != nil {
return m.Laptop
}
return nil
}
type CreateLaptopResponse struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
| func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} }
func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopResponse) ProtoMessage() {}
func (*CreateLaptopResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{2}
}
func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b)
}
func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic)
}
func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopResponse.Merge(m, src)
}
func (m *CreateLaptopResponse) XXX_Size() int {
return xxx_messageInfo_CreateLaptopResponse.Size(m)
}
func (m *CreateLaptopResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo
func (m *CreateLaptopResponse) GetId() string {
if m != nil {
return m.ID
}
return ""
}
func init() {
proto.RegisterType((*Laptop)(nil), "pc.Laptop")
proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest")
proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse")
}
func init() {
proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705)
}
var fileDescriptor_28a7e4886f546705 = []byte{
// 459 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40,
0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72,
0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec,
0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2,
0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8,
0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1,
0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5,
0xed, 0x8f, 0x95, 0xa9, | random_line_split |
|
laptop.pb.go | () *Screen {
if m != nil {
return m.Screen
}
return nil
}
func (m *Laptop) GetKeyboard() *Keyboard {
if m != nil {
return m.Keyboard
}
return nil
}
type isLaptop_Weight interface {
isLaptop_Weight()
}
type Laptop_WeightKg struct {
WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"`
}
type Laptop_WeightLb struct {
WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"`
}
func (*Laptop_WeightKg) isLaptop_Weight() {}
func (*Laptop_WeightLb) isLaptop_Weight() {}
func (m *Laptop) GetWeight() isLaptop_Weight {
if m != nil {
return m.Weight
}
return nil
}
func (m *Laptop) GetWeightKg() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightKg); ok {
return x.WeightKg
}
return 0
}
func (m *Laptop) GetWeightLb() float64 {
if x, ok := m.GetWeight().(*Laptop_WeightLb); ok {
return x.WeightLb
}
return 0
}
func (m *Laptop) GetPriceUsd() float64 {
if m != nil {
return m.PriceUsd
}
return 0
}
func (m *Laptop) GetReleaseYear() uint32 {
if m != nil {
return m.ReleaseYear
}
return 0
}
func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp {
if m != nil {
return m.UpdatedAt
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Laptop) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Laptop_WeightKg)(nil),
(*Laptop_WeightLb)(nil),
}
}
type CreateLaptopRequest struct {
Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} }
func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopRequest) ProtoMessage() {}
func (*CreateLaptopRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{1}
}
func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b)
}
func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic)
}
func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopRequest.Merge(m, src)
}
func (m *CreateLaptopRequest) XXX_Size() int {
return xxx_messageInfo_CreateLaptopRequest.Size(m)
}
func (m *CreateLaptopRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo
func (m *CreateLaptopRequest) GetLaptop() *Laptop {
if m != nil {
return m.Laptop
}
return nil
}
type CreateLaptopResponse struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} }
func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) }
func (*CreateLaptopResponse) ProtoMessage() {}
func (*CreateLaptopResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_28a7e4886f546705, []int{2}
}
func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b)
}
func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic)
}
func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateLaptopResponse.Merge(m, src)
}
func (m *CreateLaptopResponse) XXX_Size() int {
return xxx_messageInfo_CreateLaptopResponse.Size(m)
}
func (m *CreateLaptopResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo
func (m *CreateLaptopResponse) GetId() string {
if m != nil {
return m.ID
}
return ""
}
func init() {
proto.RegisterType((*Laptop)(nil), "pc.Laptop")
proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest")
proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse")
}
func init() {
proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705)
}
var fileDescriptor_28a7e4886f546705 = []byte{
// 459 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40,
0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72,
0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec,
0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2,
0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8,
0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1,
0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5,
0 | GetScreen | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.