file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
contacts-details.component.ts | => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
ngOnChanges(changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc) |
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == | {
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
} | conditional_block |
contacts-details.component.ts | => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
| (changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 | ngOnChanges | identifier_name |
jobs_contracts.go | deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func | (objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, | matchInstanceName | identifier_name |
jobs_contracts.go | )
deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation") | response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, | }
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file") | random_line_split |
jobs_contracts.go | deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) | log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, | {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname) | identifier_body |
jobs_contracts.go | deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil |
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil | {
return "", err
} | conditional_block |
client.d.ts | offline
status: string, //"ok", "async", "failed"
data: object | null,
error?: RetError | null,
}
//////////
export interface VipInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly level?: number,
readonly level_speed?: number,
readonly vip_level?: number,
readonly vip_growth_speed?: number,
readonly vip_growth_total?: string,
}
export interface StrangerInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly signature?: string,
readonly description?: string,
readonly group_id?: number,
}
export interface FriendInfo extends StrangerInfo {
readonly remark?: string
}
export interface GroupInfo {
readonly group_id?: number,
readonly group_name?: string,
readonly member_count?: number,
readonly max_member_count?: number,
readonly owner_id?: number,
readonly last_join_time?: number,
readonly last_sent_time?: number,
readonly shutup_time_whole?: number, //全员禁言到期时间
readonly shutup_time_me?: number, //我的禁言到期时间
readonly create_time?: number,
readonly grade?: number,
readonly max_admin_count?: number,
readonly active_member_count?: number,
readonly update_time?: number, //当前群资料的最后更新时间
}
export interface MemberInfo {
readonly group_id?: number,
readonly user_id?: number,
readonly nickname?: string,
readonly card?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly join_time?: number,
readonly last_sent_time?: number,
readonly level?: number,
readonly rank?: string,
readonly role?: string,
readonly unfriendly?: boolean,
readonly title?: string,
readonly title_expire_time?: number,
readonly card_changeable?: boolean,
readonly shutup_time?: number, //禁言到期时间
readonly update_time?: number, //此群员资料的最后更新时间
}
export interface MessageId {
message_id: string
}
//////////
export interface RetStrangerList extends RetCommon {
data: ReadonlyMap<number, StrangerInfo>
}
export interface RetFriendList extends RetCommon {
data: ReadonlyMap<number, FriendInfo>
}
export interface RetGroupList extends RetCommon {
data: ReadonlyMap<number, GroupInfo>
}
export interface RetMemberList extends RetCommon {
data: ReadonlyMap<number, MemberInfo> | null
}
export interface RetStrangerInfo extends RetCommon {
data: StrangerInfo | null
}
export interface RetGroupInfo extends RetCommon {
data: GroupInfo | null
}
export interface RetMemberInfo extends RetCommon {
data: MemberInfo | null
}
export interface RetSendMsg extends RetCommon {
data: MessageId | null
}
export interface RetStatus extends RetCommon {
data: Status
}
export interface RetLoginInfo extends RetCommon {
data: LoginInfo
}
//////////
/**
* @see https://github.com/howmanybots/onebot/blob/master/v11/specs/message/segment.md
*/
export interface MessageElem {
type: string,
data?: object,
}
export interface Anonymous {
id: number,
name: string,
flag: string,
}
export interface EventData {
self_id: number,
time: number,
post_type: string,
system_type?: string,
request_type?: string,
message_type?: string,
notice_type?: string,
sub_type?: string,
image?: Buffer,
url?: string,
message?: MessageElem | string,
raw_message?: string,
message_id?: string,
user_id?: number,
nickname?: string,
group_id?: number,
group_name?: string,
discuss_id?: number,
discuss_name?: string,
font?: string,
anonymous?: Anonymous | null,
sender?: FriendInfo & MemberInfo,
member?: MemberInfo,
auto_reply?: boolean,
flag?: string,
comment?: string,
source?: string,
role?: string,
inviter_id?: number,
operator_id?: number,
duration?: number,
set?: boolean,
dismiss?: boolean,
signature?: string,
title?: string,
content?: string,
action?: string,
suffix?: string,
enable_guest?: boolean,
enable_anonymous?: boolean,
enable_upload_album?: boolean,
enable_upload_file?: boolean,
enable_temp_chat?: boolean,
enable_new_group?: boolean,
enable_show_honor?: boolean,
enable_show_level?: boolean,
enable_show_title?: boolean,
enable_confess?: boolean,
}
//////////
export class Client extends events.EventEmitter {
private constructor();
readonly uin: number;
readonly password_md5: Buffer;
readonly nickname: string;
readonly sex: string;
readonly age: number;
readonly online_status: number;
readonly fl: ReadonlyMap<number, FriendInfo>;
readonly sl: ReadonlyMap<number, StrangerInfo>;
readonly gl: ReadonlyMap<number, GroupInfo>;
readonly gml: ReadonlyMap<number, ReadonlyMap<number, MemberInfo>>;
readonly logger: log4js.Logger;
readonly dir: string;
readonly config: ConfBot;
readonly stat: Statistics;
login(password?: Buffer | string): void; //密码支持明文和md5
captchaLogin(captcha: string): void;
terminate(): void; //直接关闭连接
logout(): Promise<void>; //先下线再关闭连接
isOnline(): boolean;
setOnlineStatus(status: number): Promise<RetCommon>; //11我在线上 31离开 41隐身 50忙碌 60Q我吧 70请勿打扰
getFriendList(): RetFriendList;
getStrangerList(): RetStrangerList;
getGroupList(): RetGroupList;
getGroupMemberList(group_id: Uin, no_cache?: boolean): Promise<RetMemberList>;
getStrangerInfo(user_id: Uin, no_cache?: boolean): Promise<RetStrangerInfo>;
getGroupInfo(group_id: Uin, no_cache?: boolean): Promise<RetGroupInfo>;
getGroupMemberInfo(group_id: Uin, user_id: Uin, no_cache?: boolean): Promise<RetMemberInfo>;
sendPrivateMsg(user_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendGroupMsg(group_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendDiscussMsg(discuss_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetCommon>;
deleteMsg(message_id: string): Promise<RetCommon>;
getMsg(message_id: string): Promise<RetCommon>;
sendGroupNotice(group_id: Uin, content: string): Promise<RetCommon>;
setGroupName(group_id: Uin, group_name: string): Promise<RetCommon>;
setGroupAnonymous(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupWholeBan(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupAdmin(group_id: Uin, user_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupSpecialTitle(group_id: Uin, user_id: Uin, special_title?: string, duration?: number): Promise<RetCommon>;
setGroupCard(group_id: Uin, user_id: Uin, card?: string): Promise<RetCommon>;
setGroupKick(group_id: Uin, user_id: Uin, reject_add_request?: boolean): Promise<RetCommon>;
setGroupBan(group_id: Uin, user_id: Uin, duration?: number): Promise<RetCommon>;
setGroupLeave(group_id: Uin, is_dismiss?: boolean): Promise<RetCommon>;
sendGroupPoke(group_id: Uin, user_id: Uin): Promise<RetCommon>; //group_id是好友时可以私聊戳一戳
setFriendAddRequest(flag: string, approve?: boolean, remark?: string, block?: boolean): Promise<RetCommon>;
setGroupAddRequest(flag: string, approve?: boolean, reason?: string, block?: boolean): Promise<RetCommon>;
addGroup(group_id: Uin, comment?: string): Promise<RetCommon>;
addFriend(group_id: Uin, user_id: Uin, comment?: string): Promise<RetCommon>;
deleteFriend(user_id: Uin, block?: boolean): Promise<RetCommon>;
inviteFriend(group_id: Uin, user_id: Uin): Promise<RetCommon>;
sendLike(user_id: Uin, times?: number): Promise<RetCommon>;
setNickname(nickname: string): Promise<RetCommon>;
setGender(gender: 0 | 1 | 2): Promise<RetCommon>; //0未知 1男 2女
setBirthday(birthday: string | number): Promise<RetCommon>; //20110202的形式
setDescription(description?: string): Promise<RetCommon>;
setSignature(signature?: string): Promise<RetCommon>;
setPortrait(file: Buffer | string): Promise<RetCommon>; //图片CQ码中file相同格式
setGroupPortrait(group_id: Uin, file: Buffer | string): Promise<RetCommon>; |
getCookies(domain?: string): Promise<RetCommon>;
getCsrfToken(): Promise<RetCommon>; | random_line_split |
|
client.d.ts | 秒)
//瞬间的断线重连不会触发此事件,通常你的机器真的没有网络而导致断线时才会触发
//设置为0则不会自动重连,然后你可以监听此事件自己处理
reconn_interval?: number,
//手动指定ip和port
//默认使用msfwifi.3g.qq.com:8080进行连接,若要修改建议优先更改该域名hosts指向而不是手动指定ip
//@link https://site.ip138.com/msfwifi.3g.qq.com/ 端口通常以下四个都会开放:80,443,8080,14000
remote_ip?: string,
remote_port?: number,
}
export interface Statistics {
readonly start_time: number,
readonly lost_times: number,
readonly recv_pkt_cnt: number,
readonly sent_pkt_cnt: number,
readonly lost_pkt_cnt: number, //超时未响应的包
readonly recv_msg_cnt: number,
readonly sent_msg_cnt: number,
}
export interface Status {
online: boolean,
status: number,
remote_ip?: number,
remote_port?: number,
msg_cnt_per_min: number,
statistics: Statistics,
config: ConfBot,
}
export type LoginInfo = StrangerInfo & VipInfo;
//////////
export interface RetError {
code?: number,
message?: string,
}
export interface RetCommon {
retcode: number, //0ok 1async 100error 102failed 103timeout 104offline
status: string, //"ok", "async", "failed"
data: object | null,
error?: RetError | null,
}
//////////
export interface VipInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly level?: number,
readonly level_speed?: number,
readonly vip_level?: number,
readonly vip_growth_speed?: number,
readonly vip_growth_total?: string,
}
export interface StrangerInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly signature?: string,
readonly description?: string,
readonly group_id?: number,
}
export interface FriendInfo extends StrangerInfo {
readonly remark?: string
}
export interface GroupInfo {
readonly group_id?: number,
readonly group_name?: string,
readonly member_count?: number,
readonly max_member_count?: number,
readonly owner_id?: number,
readonly last_join_time?: number,
readonly last_sent_time?: number,
readonly shutup_time_whole?: number, //全员禁言到期时间
readonly shutup_time_me?: number, //我的禁言到期时间
readonly create_time?: number,
readonly grade?: number,
readonly max_admin_count?: number,
readonly active_member_count?: number,
readonly update_time?: number, //当前群资料的最后更新时间
}
export interface MemberInfo {
readonly group_id?: number,
readonly user_id?: number,
readonly nickname?: string,
readonly card?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly join_time?: number,
readonly last_sent_time?: number,
readonly level?: number,
readonly rank?: string,
readonly role?: string,
readonly unfriendly?: boolean,
readonly title?: string,
readonly title_expire_time?: number,
readonly card_changeable?: boolean,
readonly shutup_time?: number, //禁言到期时间
readonly update_time?: number, //此群员资料的最后更新时间
}
export interface MessageId {
message_id: string
}
//////////
export interface RetStrangerList extends RetCommon {
data: ReadonlyMap<number, StrangerInfo>
}
export interface RetFriendList extends RetCommon {
data: ReadonlyMap<number, FriendInfo>
}
export interface RetGroupList extends RetCommon {
data: ReadonlyMap<number, GroupInfo>
}
export interface RetMemberList extends RetCommon {
data: ReadonlyMap<number, MemberInfo> | null
}
export interface RetStrangerInfo extends RetCommon {
data: StrangerInfo | null
}
export interface RetGroupInfo extends RetCommon {
data: GroupInfo | null
}
export interface RetMemberInfo extends RetCommon {
data: MemberInfo | null
}
export interface RetSendMsg extends RetCommon {
data: MessageId | null
}
export interface RetStatus extends RetCommon {
data: Status
}
export interface RetLoginInfo extends RetCommon {
data: LoginInfo
}
//////////
/**
* @see https://github.com/howmanybots/onebot/blob/master/v11/specs/message/segment.md
*/
export interface MessageElem {
type: string,
data?: object,
}
export interface Anonymous {
id: number,
name: string,
flag: string,
}
export interface EventData {
self_id: number,
time: number,
post_type: string,
system_type?: string,
request_type?: string,
message_type?: string,
notice_type?: string,
sub_type?: string,
image?: Buffer,
url?: string,
message?: MessageElem | string,
raw_message?: string,
message_id?: string,
user_id?: number,
nickname?: string,
group_id?: number,
group_name?: string,
discuss_id?: number,
discuss_name?: string,
font?: string,
anonymous?: Anonymous | null,
sender?: FriendInfo & MemberInfo,
member?: MemberInfo,
auto_reply?: boolean,
flag?: string,
comment?: string,
source?: string,
role?: string,
inviter_id?: number,
operator_id?: number,
duration?: number,
set?: boolean,
dismiss?: boolean,
signature?: string,
title?: string,
content?: string,
action?: string,
suffix?: string,
enable_guest?: boolean,
enable_anonymous?: boolean,
enable_upload_album?: boolean,
enable_upload_file?: boolean,
enable_temp_chat?: boolean,
enable_new_group?: boolean,
enable_show_honor?: boolean,
enable_show_level?: boolean,
enable_show_title?: boolean,
enable_confess?: boolean,
}
//////////
export class Client extends events.EventEmitter {
private constructor();
readonly uin: number;
readonly password_md5: Buffer;
readonly nickname: string;
readonly sex: string;
readonly age: number;
readonly online_status: number;
readonly fl: ReadonlyMap<number, FriendInfo>;
readonly sl: ReadonlyMap<number, StrangerInfo>;
readonly gl: ReadonlyMap<number, GroupInfo>;
readonly gml: ReadonlyMap<number, ReadonlyMap<number, MemberInfo>>;
readonly logger: log4js.Logger;
readonly dir: string;
readonly config: ConfBot;
readonly stat: Statistics;
login(password?: Buffer | string): void; //密码支持明文和md5
captchaLogin(capt | tring): void;
terminate(): void; //直接关闭连接
logout(): Promise<void>; //先下线再关闭连接
isOnline(): boolean;
setOnlineStatus(status: number): Promise<RetCommon>; //11我在线上 31离开 41隐身 50忙碌 60Q我吧 70请勿打扰
getFriendList(): RetFriendList;
getStrangerList(): RetStrangerList;
getGroupList(): RetGroupList;
getGroupMemberList(group_id: Uin, no_cache?: boolean): Promise<RetMemberList>;
getStrangerInfo(user_id: Uin, no_cache?: boolean): Promise<RetStrangerInfo>;
getGroupInfo(group_id: Uin, no_cache?: boolean): Promise<RetGroupInfo>;
getGroupMemberInfo(group_id: Uin, user_id: Uin, no_cache?: boolean): Promise<RetMemberInfo>;
sendPrivateMsg(user_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendGroupMsg(group_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendDiscussMsg(discuss_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetCommon>;
deleteMsg(message_id: string): Promise<RetCommon>;
getMsg(message_id: string): Promise<RetCommon>;
sendGroupNotice(group_id: Uin, content: string): Promise<RetCommon>;
setGroupName(group_id: Uin, group_name: string): Promise<RetCommon>;
setGroupAnonymous(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupWholeBan(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupAdmin(group_id: Uin, user_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupSpecialTitle(group_id: Uin, user_id: Uin, special_title?: string, duration?: number): Promise<RetCommon>;
setGroupCard(group_id: Uin, user_id: Uin, card?: string): Promise<RetCommon>;
setGroupKick(group_id: Uin, user_id: Uin, reject_add_request?: boolean): Promise<RetCommon>;
setGroupBan(group_id: Uin, user_id: Uin, duration?: number): Promise<RetCommon>;
setGroupLeave(group_id: Uin, is_dismiss?: boolean): Promise<RetCommon>;
sendGroupPoke(group_id | cha: s | identifier_name |
utils.py | able
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def denorm_ibn(im): return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device))
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
|
return highlight(code, PythonLexer | raise ValueError('does not know how to deal with such datatype') | conditional_block |
utils.py | able
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def denorm_ibn(im): |
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, Python | return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device)) | identifier_body |
utils.py | able
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def | (im): return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device))
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, PythonLexer | denorm_ibn | identifier_name |
utils.py | if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, PythonLexer(), TerminalFormatter())
def pdist(repres: th.Tensor, metric: str) -> th.Tensor:
'''
Helper: compute pairwise distance matrix.
https://github.com/pytorch/pytorch/issues/48306
'''
assert(len(repres.shape) == 2)
with th.no_grad():
if metric == 'C':
# 1. th.nn.functional.cosine_similarity(x[:,:,None],
# x.t()[None,:,:])
repres = th.nn.functional.normalize(repres, dim=-1)
pdist = 1.0 - th.mm(repres, repres.t())
elif metric in ('E', 'N'):
if metric == 'N':
repres = th.nn.functional.normalize(repres, dim=-1)
# Memory efficient pairwise euclidean distance matrix
# 1. th.nn.functional.pairwise_distance(x[:,:,None], x.t()[None,:,:])
# 2. th.cdist(x,x)
prod = th.mm(repres, repres.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
pdist = (norm + norm.t() - 2 * prod).sqrt()
else:
raise ValueError(f'illegal metric {metric}')
return pdist
def orthogonalRegularization(model, loss):
losses = []
for m in model.modules():
if isinstance(m, th.nn.Linear):
w = m.weight
mat = th.matmul(w, w.t())
diff = mat - th.diag(th.diag(mat))
loss = th.mean(th.pow(diff, 2))
losses.append(loss)
return th.sum(losses)
@contextlib.contextmanager
def openlock(*args, **kwargs):
lock = open(*args, **kwargs)
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
yield lock
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
def nsort(L: list, R: str):
'''
sort list L by the key:int matched from regex R, descending.
'''
assert(all(re.match(R, item) for item in L))
nL = [(int(re.match(R, item).groups()[0]), item) for item in L]
nL = sorted(nL, key=lambda x: x[0], reverse=True)
return [x[-1] for x in nL]
def test_nsort():
x = [x.strip() for x in '''
version_0
version_2
version_10
version_3
version_1
'''.strip().split('\n')]
y = [y.strip() for y in '''
epoch=0.ckpt
epoch=10.ckpt | epoch=2.ckpt | random_line_split |
|
tree_based_retrieval.py | 0, mode = 'FAN_AVG', uniform = True)
class TreeModel():
| query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def calc | def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops: | identifier_body |
tree_based_retrieval.py | 0, mode = 'FAN_AVG', uniform = True)
class TreeModel():
def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
|
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def | self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]]) | conditional_block |
tree_based_retrieval.py | mode = 'FAN_AVG', uniform = True)
class TreeModel():
def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def | calc_score | identifier_name |
|
tree_based_retrieval.py | 0, mode = 'FAN_AVG', uniform = True) | TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def |
class TreeModel():
def __init__(self): | random_line_split |
run.py | accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
|
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[ | print("training set RMSE for epoch %d is %f" % (epoch, score)) | conditional_block |
run.py | (x):
# x.shape = (?,6040,5)
x_cumsum = K.cumsum(x, axis=2)
# x_cumsum.shape = (?,6040,5)
output = K.softmax(x_cumsum)
# output = (?,6040,5)
return output
def prediction_output_shape(input_shape):
return input_shape
def d_layer(x):
return K.sum(x, axis=1)
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
print("training set RMSE for epoch %d is %f" % (epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set | prediction_layer | identifier_name |
|
run.py |
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
print("training set RMSE for epoch %d is %f" % (epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size | return K.sum(x, axis=1) | identifier_body |
|
run.py | epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = cf_nade_model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (pred_batch * rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
print("test set RMSE is %f" % (rmse))
def main():
import argparse
parser = argparse.ArgumentParser(description='CFNADE-keras')
parser.add_argument(
'--hidden_dim',
type=int,
default=500,
help='Iteration unit for validation')
# keras-1 에서는 500짜리 keras-2에서는 250짜리 실험 중...
parser.add_argument(
'--normalize_1st_layer',
type=bool,
default=False,
help='normalize 1st layer')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='learning rate for optimizer.')
# parser.add_argument(
# '--iter_validation',
# type=int,
# default=10,
# help='Iteration unit for validation')
# parser.add_argument(
# '--max_iter', type=int, default=10000000, help='Max Iteration')
# parser.add_argument(
# '--n_hidden_unit',
# type=int,
# default=500,
# help='The number of hidden unit')
# parser.add_argument(
# '--parameter_sharing',
# type=bool,
# default=False,
# help='parameter sharing')
# parser.add_argument(
# '--lambda_1',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--lambda_2',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--dropout_rate', type=float, default=0., help='dropout_rate')
# parser.add_argument(
# '--iter_early_stop',
# type=int,
# default=10000,
# help='the number of iteration for early stop.')
# parser.add_argument( | # '--data_seed', type=int, default=1, help='the seed for dataset')
args = parser.parse_args() | random_line_split |
|
service.go | /zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil |
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger | {
log.Fatalf("Cannot start receivers: %v", err)
} | conditional_block |
service.go | .org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
|
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger | // Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil) | random_line_split |
service.go | .org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) | () error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger | setupExtensions | identifier_name |
service.go | .org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() | log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger | {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil { | identifier_body |
NLP_V3_SVD_XGBM_HyperParamCV.py | L1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
| #sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(est | #import seaborn as sns
| random_line_split |
NLP_V3_SVD_XGBM_HyperParamCV.py | L1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
|
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
#import seaborn as sns
#sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(estimator | words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words | identifier_body |
NLP_V3_SVD_XGBM_HyperParamCV.py | 1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def | (str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
#import seaborn as sns
#sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(est | stemming_tokenizer | identifier_name |
manager.py | (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
|
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize | return maybe_exchange # already initialized | conditional_block |
manager.py | (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
|
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# | return len(list(self.iterate_exchanges())) | identifier_body |
manager.py | (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def | (self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# | connected_and_syncing_exchanges_num | identifier_name |
manager.py | (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
| module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize | def initialize_exchange(
self, | random_line_split |
interrupt.rs | match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound Range Exceeded (0x5)"),
Self::INVALID_OPCODE => fn ud_isr("Invalid Opcode (0x6)"),
Self::DEVICE_NOT_AVAILABLE => fn no_fpu_isr("Device (FPU) Not Available (0x7)"),
Self::ALIGNMENT_CHECK => fn alignment_check_isr("Alignment Check (0x11)", code),
Self::SIMD_FLOATING_POINT => fn simd_fp_exn_isr("SIMD Floating-Point Exception (0x13)"),
Self::X87_FPU_EXCEPTION => fn x87_exn_isr("x87 Floating-Point Exception (0x10)"),
}
// other exceptions, not mapped to the "code fault" handler
self.set_isr(Self::PAGE_FAULT, page_fault_isr::<H> as *const ());
self.set_isr(Self::INVALID_TSS, invalid_tss_isr::<H> as *const ());
self.set_isr(
Self::SEGMENT_NOT_PRESENT,
segment_not_present_isr::<H> as *const (),
);
self.set_isr(
Self::STACK_SEGMENT_FAULT,
stack_segment_isr::<H> as *const (),
);
self.set_isr(Self::GENERAL_PROTECTION_FAULT, gpf_isr::<H> as *const ());
self.set_isr(Self::DOUBLE_FAULT, double_fault_isr::<H> as *const ());
// === hardware interrupts ===
// ISA standard hardware interrupts mapped on both the PICs and IO APIC
// interrupt models.
self.set_isr(Self::PIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::PIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
// local APIC specific hardware itnerrupts
self.set_isr(Self::LOCAL_APIC_SPURIOUS, spurious_isr as *const ());
self.set_isr(Self::LOCAL_APIC_TIMER, apic_timer_isr::<H> as *const ());
// vector 69 (nice) is reserved by the HAL for testing the IDT.
self.set_isr(69, test_isr::<H> as *const ());
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
instruction_ptr,
code_segment,
stack_ptr,
stack_segment,
_pad: _,
cpu_flags,
_pad2: _,
} = self;
f.debug_struct("Registers")
.field("instruction_ptr", instruction_ptr)
.field("code_segment", code_segment)
.field("cpu_flags", &format_args!("{cpu_flags:#b}"))
.field("stack_ptr", stack_ptr)
.field("stack_segment", stack_segment)
.finish()
}
}
impl fmt::Display for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, " rip: {:?}", self.instruction_ptr)?;
writeln!(f, " cs: {:?}", self.code_segment)?;
writeln!(f, " flags: {:#b}", self.cpu_flags)?;
writeln!(f, " rsp: {:?}", self.stack_ptr)?;
writeln!(f, " ss: {:?}", self.stack_segment)?;
Ok(())
}
}
pub fn fire_test_interrupt() {
unsafe { asm!("int {0}", const 69) }
}
// === impl SelectorErrorCode ===
impl SelectorErrorCode {
#[inline]
fn named(self, segment_kind: &'static str) -> NamedSelectorErrorCode {
NamedSelectorErrorCode {
segment_kind,
code: self,
}
}
fn display(&self) -> impl fmt::Display {
struct PrettyErrorCode(SelectorErrorCode);
impl fmt::Display for PrettyErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let table = self.0.get(SelectorErrorCode::TABLE);
let index = self.0.get(SelectorErrorCode::INDEX);
write!(f, "{table} index {index}")?;
if self.0.get(SelectorErrorCode::EXTERNAL) {
f.write_str(" (from an external source)")?;
}
write!(f, " (error code {:#b})", self.0.bits())?;
Ok(())
}
}
PrettyErrorCode(*self)
}
}
struct NamedSelectorErrorCode {
segment_kind: &'static str,
code: SelectorErrorCode,
}
impl fmt::Display for NamedSelectorErrorCode {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} at {}", self.segment_kind, self.code.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::mem::size_of;
#[test]
fn | registers_is_correct_size | identifier_name |
|
interrupt.rs | t::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr = ?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model = ?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local, .. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr |
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
| {
crate::control_regs::Cr2::read()
} | identifier_body |
interrupt.rs | interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local, .. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr {
crate::control_regs::Cr2::read()
}
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if !was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers, | code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>( | random_line_split |
|
node.rs |
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
| random_line_split |
||
node.rs | {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn | (&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
| take | identifier_name |
node.rs | NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress { .. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress { .. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
| true
} el | conditional_block |
|
node.rs | {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
|
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
| {
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
} | identifier_body |
create-contact.component.ts | + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
| this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
}
addPhoneNumber() {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// let | this.initAlgolia();
}
initAlgolia(): void {
| random_line_split |
create-contact.component.ts | + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
this.initAlgolia();
}
initAlgolia(): void {
this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
}
| () {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// let | addPhoneNumber | identifier_name |
create-contact.component.ts | JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
this.initAlgolia();
}
initAlgolia(): void {
this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() |
addPhoneNumber() {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// | {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
} | identifier_body |
deployment.go |
/BASEDIR/APPNAME/release/VERSION1
/BASEDIR/APPNAME/release/VERSION2
/BASEDIR/APPNAME/release/VERSION3
Releasing a version points the "current" symlink to the specified release directory.
*/
package deployment
import (
"crypto/hmac"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"strconv"
"strings"
"github.com/mredivo/pulldeploy/pdconfig"
)
const kARTIFACTDIR = "artifact"
const kRELEASEDIR = "release"
const kCURRENTDIR = "current"
const kHMACSUFFIX = "hmac"
// Deployment provides methods for manipulating local deployment files.
type Deployment struct {
appName string // The name of the application
cfg pdconfig.AppConfig // The deployment configuration
acfg pdconfig.ArtifactConfig // The Artifact Type configuration
uid int // The numeric UID to own all files for this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
| "release" directory. | random_line_split |
|
deployment.go | this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil | else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir | {
d.acfg = *ac
} | conditional_block |
deployment.go |
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error | {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir)
}
symlinkPath := path.Join(d.baseDir, kCURRENTDIR)
os.Remove(symlinkPath)
return os.Symlink(versionDir, symlinkPath)
} | identifier_body |
|
deployment.go | this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) | (version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir | ArtifactPresent | identifier_name |
mod.rs | コールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), inter | spatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
| est).unwrap();
}
Di | conditional_block |
mod.rs | fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
| i | identifier_name |
|
mod.rs | へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true; | Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
|
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn | identifier_body |
mod.rs | へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> { |
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
| type Output = R; | random_line_split |
autoencoder.py | self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
|
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def _square_image(self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_c | '''
Display original input image(s).
'''
pass | identifier_body |
autoencoder.py | ._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
|
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def _square_image(self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_c | self._invalid_input() | conditional_block |
autoencoder.py | """
import PIL
from PIL import Image
import numpy as np
import sys
from matplotlib import image
from matplotlib import pyplot as plt
#from google.colab import files
import os.path
from os import path
import pandas as pd
'''
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
class Autoencoder(Model):
''
Define the autoencoder with help from the tensorflow library.
Attributes:
_latent_dim: The number of latent dimentions the images get mapped onto.
_epochs: The number of epochs the autoencoder goes through to train.
encoder: The encoder layers of the autoencoder.
decoder: The decoder layers of the autoencoder.
''
def __init__(self):
super(Autoencoder, self).__init__()
self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = ( | Original file is located at
https://colab.research.google.com/drive/1SFG3__AoM7dvKI06qiu76tW-mtbZDsxn | random_line_split |
|
autoencoder.py | self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def | (self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_c | _square_image | identifier_name |
client.go | 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) | (ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status | Update | identifier_name |
client.go | 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil |
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case | {
return nil, err
} | conditional_block |
client.go | 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client |
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case | {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
} | identifier_body |
client.go | 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
} | if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status |
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config) | random_line_split |
LyftDataAnalysis.py | 4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
|
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride | total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400)) | conditional_block |
LyftDataAnalysis.py | 4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def | (driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride | get_fare | identifier_name |
LyftDataAnalysis.py | 4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
|
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime | total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare | identifier_body |
LyftDataAnalysis.py | 4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
| rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime | start = time.time() | random_line_split |
lib.rs | entially quantified variable.
BNode(BlankNode<TD>),
/// An RDF literal.
Literal(Literal<TD>),
/// A universally quantified variable like in SPARQL or Notation3.
Variable(Variable<TD>),
}
/// Trait alias for types holding the textual data of terms.
pub trait TermData: AsRef<str> + Clone + Eq + Hash {}
impl<T> TermData for T where T: AsRef<str> + Clone + Eq + Hash {}
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type BoxTerm = Term<Box<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RcTerm = Term<Rc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type ArcTerm = Term<Arc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RefTerm<'a> = Term<&'a str>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type StaticTerm = RefTerm<'static>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type MownTerm<'a> = Term<MownStr<'a>>;
impl<T> Term<T>
where
T: TermData,
{
/// Return a new IRI term from the given text.
///
/// May fail if `txt` is not a valid IRI. | U: AsRef<str>,
T: From<U>,
{
Iri::<T>::new(iri).map(Into::into)
}
/// Return a new IRI term from the two given parts (prefix and suffix).
///
/// May fail if the concatenation of `ns` and `suffix`
/// does not produce a valid IRI.
pub fn new_iri_suffixed<U, V>(ns: U, suffix: V) -> Result<Term<T>>
where
U: AsRef<str>,
V: AsRef<str>,
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed(ns, suffix).map(Into::into)
}
/// Return a new blank node term with the given bnode ID.
///
/// Currently, this may never fail;
/// however it returns a result for homogeneity with other constructor methods,
/// and because future versions may be more picky regarding bnode IDs.
pub fn new_bnode<U>(id: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::new(id).map(Into::into)
}
/// Return a new literal term with the given value and language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// | pub fn new_iri<U>(iri: U) -> Result<Term<T>>
where | random_line_split |
lib.rs | language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// Return a new blank node term.
///
/// # Pre-condition
///
/// This function requires that `id` is a valid bnode ID.
pub fn new_bnode_unchecked<U>(id: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::<T>::new_unchecked(id).into()
}
/// Return a literal term.
///
/// # Pre-condition
///
/// This function requires that `lang` is a valid language tag.
/// In debug mode this constraint is asserted.
pub fn new_literal_lang_unchecked<U, V>(txt: U, lang: V) -> Self
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang_unchecked(txt, lang).into()
}
/// Return a typed literal term.
///
/// # Panics
///
/// Panics if `dt` cannot be converted into an IRI.
pub fn new_literal_dt_unchecked<U, V>(txt: U, dt: V) -> Self
where
T: From<U>,
V: TryInto<Iri<T>>,
<V as TryInto<Iri<T>>>::Error: Debug,
{
Literal::new_dt(txt, dt.try_into().unwrap()).into()
}
/// Return a new variable term.
///
/// # Pre-condition
///
/// This function requires that `name` is a valid variable name.
pub fn new_variable_unchecked<U>(name: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
Variable::<T>::new_unchecked(name).into()
}
}
impl<T: TermData> TTerm for Term<T> {
fn kind(&self) -> TermKind {
use Term::*;
match self {
Iri(_) => TermKind::Iri,
Literal(_) => TermKind::Literal,
BNode(_) => TermKind::BlankNode,
Variable(_) => TermKind::Variable,
}
}
fn value_raw(&self) -> RawValue {
use Term::*;
match self {
Iri(i) => i.value_raw(),
Literal(l) => l.value_raw(),
BNode(b) => b.value_raw(),
Variable(v) => v.value_raw(),
}
}
fn datatype(&self) -> Option<SimpleIri> {
if let Term::Literal(lit) = self {
lit.datatype()
} else {
None
}
}
fn language(&self) -> Option<&str> {
if let Term::Literal(lit) = self {
lit.language()
} else {
None
}
}
fn as_dyn(&self) -> &dyn TTerm {
self
}
}
impl<TD, TE> PartialEq<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn eq(&self, other: &TE) -> bool {
term_eq(self, other)
}
}
impl<TD, TE> PartialOrd<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn | partial_cmp | identifier_name |
|
custom_insts.rs | of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
}; | /// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is | }
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
/// | random_line_split |
custom_insts.rs | _version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn is_debuginfo(self) -> bool | {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
} | identifier_body |
|
custom_insts.rs | starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn | is_debuginfo | identifier_name |
|
mod.rs | uf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn | () {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed | initialize_backend | identifier_name |
mod.rs | uf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_> | #[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(D | }); | random_line_split |
mod.rs | _feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal |
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed | {
&self.loop_signal
} | identifier_body |
mod.rs | run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() | {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
} | conditional_block |
|
mod.rs | Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown | {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
} | identifier_body |
|
mod.rs | ) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where | random_line_split |
||
mod.rs | .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn | () -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future | new | identifier_name |
manager.py | initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
|
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
| if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components] | identifier_body |
manager.py | initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def | (self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
| __contains__ | identifier_name |
manager.py | initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
|
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
| current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current) | conditional_block |
manager.py | initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components |
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) | Instantiated components to register. | random_line_split |
digest.py | _contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length,
n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def | (pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the | attribute_fragments | identifier_name |
digest.py | )
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0.
>>> find_frag(31, [0, 20, 30])
Traceback (most recent call last):
...
ValueError: Read position is larger than last entry in restriction table.
"""
if r_sites[0] != 0:
raise ValueError(
"The first position in the restriction table is not 0."
)
if pos > r_sites[-1]:
raise ValueError(
"Read position is larger than last entry in restriction table."
)
# binary search for the index of the read
index = max(np.searchsorted(r_sites, pos, side="right") - 1, 0)
# Last site = end of the chrom, index of last fragment is last site - 1
index = min(len(r_sites) - 2, index)
return index
def frag_len(
frags_file_name=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
plot=False,
fig_path=None,
):
"""
logs summary statistics of fragment length distribution based on an
input fragment file. Can optionally show a histogram instead
of text summary.
Parameters
----------
frags_file_name : str
Path to the output list of fragments.
output_dir : str
Directory where the list should be saved.
plot : bool
Wether a histogram of fragment length should be shown.
fig_path : str
If a path is given, the figure will be saved instead of shown.
"""
try:
frag_list_path = os.path.join(output_dir, frags_file_name)
except TypeError:
frag_list_path = frags_file_name
frags = pd.read_csv(frag_list_path, sep="\t")
nfrags = frags.shape[0]
med_len = frags["size"].median()
nbins = 40
if plot:
fig, ax = plt.subplots()
_, _, _ = ax.hist(frags["size"], bins=nbins)
ax.set_xlabel("Fragment length [bp]")
ax.set_ylabel("Log10 number of fragments")
ax.set_title("Distribution of restriction fragment length")
ax.set_yscale("log", base=10)
ax.annotate(
"Total fragments: {}".format(nfrags),
xy=(0.95, 0.95),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
ax.annotate(
"Median length: {}".format(med_len),
xy=(0.95, 0.90),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
if fig_path:
plt.savefig(fig_path)
else:
plt.show()
plt.clf()
else:
logger.info(
"Genome digested into {0} fragments with a median "
"length of {1}".format(nfrags, med_len)
)
def gen_enzyme_religation_regex(enzyme):
"""Return a regex which corresponds to all possible religation sites given a
set of enzyme.
Parameters:
-----------
enzyme : str
String that contains the names of the enzyme separated by a comma.
Returns:
--------
re.Pattern :
Regex that corresponds to all possible ligation sites given a set of
enzyme.
Examples:
---------
>>> gen_enzyme_religation_regex('HpaII')
re.compile('CCGCGG')
>>> gen_enzyme_religation_regex('HpaII,MluCI')
re.compile('AATTAATT|AATTCGG|CCGAATT|CCGCGG')
"""
# Split the str on the comma to separate the different enzymes.
enzyme = enzyme.split(",")
# Check on Biopython dictionnary the enzyme.
rb = RestrictionBatch(enzyme)
# Initiation:
give_list = []
accept_list = []
ligation_list = []
# Iterates on the enzymes.
for enz in rb:
# Extract restriction sites and look for cut sites.
| site = enz.elucidate()
fw_cut = site.find("^")
rev_cut = site.find("_")
# Process "give" site. Remove N on the left (useless).
give_site = site[:rev_cut].replace("^", "")
while give_site[0] == "N":
give_site = give_site[1:]
give_list.append(give_site)
# Process "accept" site. Remove N on the rigth (useless).
accept_site = site[fw_cut + 1 :].replace("_", "")
while accept_site[-1] == "N":
accept_site = accept_site[:-1]
accept_list.append(accept_site) | conditional_block |
|
digest.py | records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length,
n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def attribute_fragments(pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
| """
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0. | identifier_body |
|
digest.py | _contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
| n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def attribute_fragments(pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the index | current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length, | random_line_split |
mod.rs | service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() | else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
propos | {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} | conditional_block |
mod.rs | service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> |
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
| {
self.db.merge(patch)
} | identifier_body |
mod.rs | _patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
| self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer. | random_line_split |
|
mod.rs | pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn | before_commit | identifier_name |
|
setup.py | CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def | (source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf | process_tempita | identifier_name |
setup.py | "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL, | project_urls=PROJECT_URLS, | random_line_split |
|
setup.py | test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
| return False | identifier_body |
|
setup.py | CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
|
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf | dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest) | conditional_block |
cpu.go | even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling
// job used by the current platform.
func | getThermalThrottlingJob | identifier_name |
|
cpu.go | context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling | random_line_split |
||
cpu.go | cleanupTime = 5 * time.Second // time reserved for cleanup after measuring.
)
for _, t := range ts {
// Start the process asynchronously by calling the provided startup function.
cmd, err := t.Start(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to run binary")
}
// Clean up the process upon exiting the function.
defer func() {
// If the exit option is 'WaitProcess' wait for the process to terminate.
if exitOption == WaitProcess {
if err := cmd.Wait(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed waiting for the command to exit: ", retErr)
}
return
}
// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal
// to the process after collecting performance metrics.
if err := cmd.Kill(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed to kill process: ", retErr)
return
}
// After sending a 'SIGKILL' signal to the process we need to wait
// for the process to terminate. If Wait() doesn't return any error,
// we know the process already terminated before we explicitly killed
// it and the measured performance metrics are invalid.
err = cmd.Wait()
if err == nil {
retErr = errors.New("process did not run for entire measurement duration")
testing.ContextLog(ctx, retErr)
return
}
// Check whether the process was terminated with a 'SIGKILL' signal.
ws, ok := testexec.GetWaitStatus(err)
if !ok {
retErr = errors.Wrap(err, "failed to get wait status")
testing.ContextLog(ctx, retErr)
} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {
retErr = errors.Wrap(err, "process did not terminate with SIGKILL signal")
testing.ContextLog(ctx, retErr)
}
}()
}
// Use a shorter context to leave time for cleanup upon failure.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
if err := testing.Sleep(ctx, stabilizeTime); err != nil {
return nil, errors.Wrap(err, "failed waiting for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage and power consumption for ", duration.Round(time.Second))
return MeasureUsage(ctx, duration)
}
// SetUpBenchmark performs setup needed for running benchmarks. It disables CPU
// frequency scaling and thermal throttling. A deferred call to the returned
// cleanUp function should be scheduled by the caller if err is non-nil.
func SetUpBenchmark(ctx context.Context) (cleanUp func(ctx context.Context), err error) {
const cleanupTime = 10 * time.Second // time reserved for cleanup on error.
var restoreScaling func(ctx context.Context) error
var restoreThrottling func(ctx context.Context) error
cleanUp = func(ctx context.Context) {
if restoreScaling != nil {
if err = restoreScaling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU frequency scaling to original values: ", err)
}
}
if restoreThrottling != nil {
if err = restoreThrottling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU thermal throttling to original values: ", err)
}
}
}
// Run the cleanUp function automatically if we encounter an error.
doCleanup := cleanUp
defer func() {
if doCleanup != nil {
doCleanup(ctx)
}
}()
// Run all non-cleanup operations with a shorter context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 |
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results | {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
} | conditional_block |
cpu.go | != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling
// job used by the current platform.
func getThermalThrottlingJob(ctx context.Context) string | {
// List of possible thermal throttling jobs that should be disabled:
// - dptf for intel >= baytrail
// - temp_metrics for link
// - thermal for daisy, snow, pit,...
for _, job := range []string{"dptf", "temp_metrics", "thermal"} {
if upstart.JobExists(ctx, job) {
return job
}
}
return ""
} | identifier_body |
|
manage.py | Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def OrderMerchandise( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
| except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if | newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit() | identifier_body |
manage.py | .session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def OrderMerchandise( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day | simDay += timedelta(days=1)
dayNum += 1 | random_line_split |
|
manage.py | 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day
simDay += timedelta(days=1)
dayNum += 1
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production.
Also sets up first admin user."""
Role.insert_roles()
admin_query = Role.query.filter_by(name='Administrator')
if admin_query.first() is not None:
if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:
| user = Employee(first_name='Admin',
last_name='Account',
password=Config.ADMIN_PASSWORD,
email=Config.ADMIN_EMAIL)
db.session.add(user)
db.session.commit()
print('Added administrator {}'.format(user.full_name())) | conditional_block |
|
manage.py | Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def | ( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
| OrderMerchandise | identifier_name |
lambda.rs | X_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn router(req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> {
code.split('.').nth(1)
}
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?; | Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen | } else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
| random_line_split |
lambda.rs | _COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn router(req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> |
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?;
} else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen | {
code.split('.').nth(1)
} | identifier_body |
lambda.rs | X_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn | (req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> {
code.split('.').nth(1)
}
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?;
} else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen | router | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.