file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
instances.go | .New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int {
return inst.stepID
}
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task |
//inst.scheduleEval(host)
}
} else {
inst.returnError = err | random_line_split |
|
instances.go | .taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int |
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst | {
return inst.stepID
} | identifier_body |
instances.go | .taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int {
return inst.stepID
}
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData |
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst | {
host.SetOutput(value.Name(), value.Value())
} | conditional_block |
instances.go | = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task
//inst.scheduleEval(host)
}
} else {
inst.returnError = err
}
}
}
func (inst *IndependentInstance) startInstance(toStart *Instance) bool {
toStart.SetStatus(model.FlowStatusActive)
//if pi.Attrs == nil {
// pi.Attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// pi.Attrs[attr.Name()] = attr
//}
//logger.Infof("FlowInstance Flow: %v", pi.FlowModel)
//need input mappings
flowBehavior := inst.flowModel.GetFlowBehavior()
ok, taskEntries := flowBehavior.Start(toStart)
if ok {
inst.enterTasks(toStart, taskEntries)
}
return ok
}
func (inst *IndependentInstance) enterTasks(activeInst *Instance, taskEntries []*model.TaskEntry) {
for _, taskEntry := range taskEntries {
//logger.Debugf("EnterTask - TaskEntry: %v", taskEntry)
taskToEnterBehavior := inst.flowModel.GetTaskBehavior(taskEntry.Task.TypeID())
enterTaskData, _ := activeInst.FindOrCreateTaskData(taskEntry.Task)
enterResult := taskToEnterBehavior.Enter(enterTaskData)
if enterResult == model.ENTER_EVAL {
inst.scheduleEval(enterTaskData)
} else if enterResult == model.ENTER_SKIP {
//todo optimize skip, just keep skipping and don't schedule eval
inst.scheduleEval(enterTaskData)
}
}
}
//////////////////////////////////////////////////////////////////
// WorkItem describes an item of work (event for a Task) that should be executed on Step
type WorkItem struct {
ID int `json:"id"`
taskInst *TaskInst `json:"-"`
TaskID string `json:"taskID"`
SubFlowID int `json:"subFlowId"`
}
// NewWorkItem constructs a new WorkItem for the specified TaskInst
func NewWorkItem(id int, taskInst *TaskInst) *WorkItem {
var workItem WorkItem
workItem.ID = id
workItem.taskInst = taskInst
workItem.TaskID = taskInst.task.ID()
workItem.SubFlowID = taskInst.flowInst.subFlowId
return &workItem
}
func | NewActivityEvalError | identifier_name |
|
ffi.rs | Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> {
let nr_class = (*model_ptr).nr_class as usize;
let l = (*model_ptr).l as usize;
slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1)
.iter()
.map(|&x| slice::from_raw_parts(x, l).to_owned())
.collect::<Vec<_>>()
}
}
extern "C" {
fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel;
fn svm_predict_values(
svm_model: *mut LibsvmModel,
svm_nodes: *const LibsvmNode,
out: *const f64,
) -> f64;
fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel);
fn svm_check_parameter(
problem: *const LibsvmProblem,
param: *const LibsvmParameter,
) -> *const c_char;
}
fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> {
unsafe {
let message = svm_check_parameter(problem, param);
if message.is_null() {
Ok(())
} else | {
Err(CStr::from_ptr(message).to_str().unwrap().to_owned())
} | conditional_block |
|
ffi.rs | svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType, | cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr | kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64, | random_line_split |
ffi.rs | svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn | <T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr | row_to_nodes | identifier_name |
ffi.rs | svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
|
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr | {
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
} | identifier_body |
auxiliary_plots.py | of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
|
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2 | sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5); | identifier_body |
auxiliary_plots.py | country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
|
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2 | fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"}); | conditional_block |
auxiliary_plots.py | 2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2,1].set_title(liste[2])
ax = sns.violinplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], inner = "quartiles");
plt.tight_layout(pad=2.5);
###
def quanti_descriptive_plots(data, liste, hue):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(4, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.scatterplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], hue = hue)
axes[0,1].set_title(liste[0])
ax = sns.scatterplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], hue = hue)
axes[1,0].set_title(liste[1])
sns.scatterplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], hue = hue);
axes[1,1].set_title(liste[1])
ax = sns.scatterplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], hue = hue);
axes[2,0].set_title(liste[2])
sns.scatterplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], hue = hue);
axes[2,1].set_title(liste[2])
ax = sns.scatterplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], hue = hue);
axes[3,0].set_title(liste[3])
sns.scatterplot(x=liste[3], y="OFn_all", data=data,ax=axes[3,0], hue = hue);
axes[3,1].set_title(liste[3])
ax = sns.scatterplot(x=liste[3], y="OFa_all_con", data=data,ax=axes[3,1], hue = hue);
plt.tight_layout(pad=2.5);
###
def plot_parallel_trends(results_df, data4):
# Since I code the x ticks by hard, I get a warning that I will supress here
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
input_data = pd.read_stata("data/test_instruments2.dta")
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(2,2, figsize=(15, 15))
plt.subplot(221)
fig.suptitle("Parallel trends assumption", fontsize = 20)
#fig.suptitle("Parallel trends assumption: Material and Reserves", fontsize = 20)
for plots in ["ln_steel", "ln_iron", "ln_timber", "ln_glass", "ln_aluminum"]:
ax = sns.lineplot(x = "year", y = plots, data = input_data, label = plots)
ax.set_ylabel("(log) production volume of respective input)")
ax.set_title("Panel A: Chinas (logged) production of input materials over time");
plt.legend(fontsize = 15)
plt.subplot(222)
ax3 = sns.lineplot(x="year",y= "lower_probGrowthpc", data = results_df, label = "below median prob", )
ax3 = sns.lineplot(x ="year",y= "upper_probGrowthpc", data = results_df, label = "above median prob") | ax3.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]); | random_line_split |
|
auxiliary_plots.py | (data, cc, pc):
""" Function to plot a custom colored worldmap with help of a standart GeoPandas dataframe. I used the iso3 number of the countries
in order to clearly identify the countries and assign the choosen value (financial amount or project count) to the
specific country
For plotting, we have to input a dataframe that contains the iso3 code of countries (cc). Furthermore you need
to specify the column of the input data that you want to display on the worldmap (pc)
Args:
-------
data = pd.dataframe wich contains column of interest
cc = columnnumber of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violin | worldplot_2 | identifier_name |
|
binge-watch.mock.ts | Ophüls",
description:
"A reluctant Hobbit, Bilbo Baggins, sets out to the Lonely Mountain with a spirited group of dwarves to reclaim their mountain home, and the gold within it from the dragon Smaug.",
image:
"https://m.media-amazon.com/images/M/MV5BMTcwNTE4MTUxMl5BMl5BanBnXkFtZTcwMDIyODM4OA@@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Fantastic Beasts and Where to Find Them",
duration: 132,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"The adventures of writer Newt Scamander in New York's secret community of witches and wizards seventy years before Harry Potter reads his book in school.",
image:
"https://m.media-amazon.com/images/M/MV5BMjMxOTM1OTI4MV5BMl5BanBnXkFtZTgwODE5OTYxMDI@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Harry Potter and the Deathly Hallows: Part 1",
duration: 146,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"As Harry (Daniel Radcliffe), Ron (Rupert Grint), and Hermione (Emma Watson) race against time and evil to destroy the Horcruxes, they uncover the existence of the three most powerful objects in the wizarding world: the Deathly Hallows.",
image:
"https://m.media-amazon.com/images/M/MV5BMTQ2OTE1Mjk0N15BMl5BanBnXkFtZTcwODE3MDAwNA@@._V1_SY1000_CR0,0,675,1000_AL_.jpg"
},
{
name: "The Chronicles of Narnia: The Lion, the Witch and the Wardrobe",
duration: 143,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"Four kids travel through a wardrobe to the land of Narnia and learn of their destiny to free it with the guidance of a mystical lion.",
image:
"https://m.media-amazon.com/images/M/MV5BMTc0NTUwMTU5OV5BMl5BanBnXkFtZTcwNjAwNzQzMw@@._V1_SY1000_CR0,0,676,1000_AL_.jpg"
},
{
name: "Casablanca",
duration: 102,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A cynical American expatriate struggles to decide whether or not he should help his former lover and her fugitive husband escape French Morocco.",
image:
"https://m.media-amazon.com/images/M/MV5BY2IzZGY2YmEtYzljNS00NTM5LTgwMzUtMzM1NjQ4NGI0OTk0XkEyXkFqcGdeQXVyNDYyMDk5MTU@._V1_.jpg"
},
{
name: "Life Is Beautiful",
duration: 116,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"When an open-minded Jewish librarian and his son become victims of the Holocaust, he uses a perfect mixture of will, humor, and imagination to protect his son from the dangers around their camp.",
image:
"https://m.media-amazon.com/images/M/MV5BYmJmM2Q4NmMtYThmNC00ZjRlLWEyZmItZTIwOTBlZDQ3NTQ1XkEyXkFqcGdeQXVyMTQxNzMzNDI@._V1_SY1000_SX670_AL_.jpg"
},
{
name: "The Breakfast Club",
duration: 97,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"Five high school students meet in Saturday detention and discover how they have a lot more in common than they thought.",
image:
"https://m.media-amazon.com/images/M/MV5BOTM5N2ZmZTMtNjlmOS00YzlkLTk3YjEtNTU1ZmQ5OTdhODZhXkEyXkFqcGdeQXVyMTQxNzMzNDI@._V1_SY1000_CR0,0,639,1000_AL_.jpg"
},
{
name: "Green Book",
duration: 130,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A working-class Italian-American bouncer becomes the driver of an African-American classical pianist on a tour of venues through the 1960s American South.",
image:
"https://m.media-amazon.com/images/M/MV5BYzIzYmJlYTYtNGNiYy00N2EwLTk4ZjItMGYyZTJiOTVkM2RlXkEyXkFqcGdeQXVyODY1NDk1NjE@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
}
];
export const TV_SERIES: TvSeries[] = [
{
id: 1,
name: "Friends",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 10,
noSeasons: 10,
director: "Marcel Ophüls",
description:
"Follows the personal and professional lives of six twenty to thirty-something-year-old friends living in Manhattan.",
image:
"https://m.media-amazon.com/images/M/MV5BNDVkYjU0MzctMWRmZi00NTkxLTgwZWEtOWVhYjZlYjllYmU4XkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_.jpg"
},
{
id: 2,
name: "The Big Bang Theory",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 24,
noSeasons: 12,
director: "Marcel Ophüls",
description:
"A woman who moves into an apartment across the hall from two brilliant but socially awkward physicists shows them how little they know about life outside of the laboratory.",
image:
"https://m.media-amazon.com/images/M/MV5BY2FmZTY5YTktOWRlYy00NmIyLWE0ZmQtZDg2YjlmMzczZDZiXkEyXkFqcGdeQXVyNjg4NzAyOTA@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
},
{
id: 3,
name: "How I Met Your Mother",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 22,
noSeasons: 9,
director: "Marcel Ophüls",
description:
"A father recounts to his children, through a series of flashbacks, the journey he and his four best friends took leading up to him meeting their mother.",
image:
"https://m.media-amazon.com/images/M/MV5BZWJjMDEzZjUtYWE1Yy00M2ZiLThlMmItODljNTAzODFiMzc2XkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
},
{
id: 4,
name: "Family Guy",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 23,
| noSeasons: 18,
director: "Marcel Ophüls",
description:
"In a wacky Rhode Island town, a dysfunctional family strive to cope with everyday life as they are thrown from one crazy scenario to another.",
image:
| random_line_split |
|
lib.rs | "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return | if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &str {
let dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
| ;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
| identifier_body |
lib.rs | "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
| impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &str {
let dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
};
| random_line_split |
|
lib.rs | "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &st | t dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
| r {
le | identifier_name |
bksv.go | "},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" |
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information | {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
} | conditional_block |
bksv.go | (c complaintdb.Complaint, submitkey string) url.Values {
first,last := c.Profile.SplitName()
if c.Activity == "" { c.Activity = "Loud noise" }
address1 := ""
addr := c.Profile.GetStructuredAddress()
if addr.Street == "" {
address1 = c.Profile.Address // default to the raw string, if we don't have a structured one
} else if addr.Number == "" {
address1 = addr.Street
} else {
address1 = addr.Number + " " + addr.Street
}
browser_version := c.Browser.Version
if (len(browser_version) > 50) {
browser_version = browser_version[0:49]
}
getLoudVal := func(in int) string {
loudVals := map[int]string{1: "Loud", 2:"Very Loud", 3:"Excessively Loud"}
if val, exists := loudVals[in]; exists {
return val
}
return "Loud"
}
vals := url.Values{
"response": {"json"}, // Must always set this as a GET param
"contactmethod": {"App"},
"apiKey": {config.Get("bksv.apiKey")},
"accept_privacy": {"Y"},
"caller_code": {c.Profile.CallerCode},
"name": {first},
"surname": {last},
"address1": {address1},
"address2": {""},
"zipcode": {addr.Zip},
"city": {addr.City},
"state": {addr.State},
"email": {c.Profile.EmailAddress},
"airports": {"KSFO"}, // KOAK, KSJC, KSAN
"month": {date.InPdt(c.Timestamp).Format("1")},
"day": {date.InPdt(c.Timestamp).Format("2")},
"year": {date.InPdt(c.Timestamp).Format("2006")},
"hour": {date.InPdt(c.Timestamp).Format("15")},
"min": {date.InPdt(c.Timestamp).Format("4")},
"sec": {date.InPdt(c.Timestamp).Format("5")},
"aircrafttype": {"J"},
"aircraftcategory": {"J"},
"activity_type": {"Other"}, // perhaps map c.Activity to something ?
"event_type": {getLoudVal(c.Loudness)}, // as per 2023.03.16
"adflag": {"U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: json | PopulateForm | identifier_name |
|
bksv.go | U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body)) | }
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
| return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/ | random_line_split |
bksv.go | "},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) | for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
| {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n" | identifier_body |
write.go | image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
}
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
}
result.Dirents = append(result.Dirents, &gokrazy)
mainPkgs, err := buildEnv.MainPackages(cfg.Packages)
if err != nil {
return nil, err
}
user := FileInfo{Filename: "user"}
for _, pkg := range mainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
user.Dirents = append(user.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
result.Dirents = append(result.Dirents, &user)
return &result, nil
}
func writeFileInfo(dir *squashfs.Directory, fi *FileInfo) error {
if fi.FromHost != "" { // copy a regular file
return copyFileSquash(dir, fi.Filename, fi.FromHost)
}
if fi.FromLiteral != "" { // write a regular file
mode := fi.Mode
if mode == 0 {
mode = 0444
}
w, err := dir.File(fi.Filename, time.Now(), mode)
if err != nil {
return err
}
if _, err := w.Write([]byte(fi.FromLiteral)); err != nil {
return err
}
return w.Close()
}
if fi.SymlinkDest != "" { // create a symlink
return dir.Symlink(fi.SymlinkDest, fi.Filename, time.Now(), 0444)
}
// subdir
var d *squashfs.Directory
if fi.Filename == "" { // root
d = dir
} else {
d = dir.Directory(fi.Filename, time.Now())
}
sort.Slice(fi.Dirents, func(i, j int) bool {
return fi.Dirents[i].Filename < fi.Dirents[j].Filename
})
for _, ent := range fi.Dirents {
if err := writeFileInfo(d, ent); err != nil {
return err
}
}
return d.Flush()
}
func | writeRoot | identifier_name |
|
write.go | string(b)
if p.Cfg.SerialConsoleOrDefault() != "off" {
config = strings.ReplaceAll(config, "enable_uart=0", "enable_uart=1")
}
w, err := fw.File("/config.txt", time.Now())
if err != nil {
return err
}
_, err = w.Write([]byte(config))
return err
}
func shortenSHA256(sum []byte) string {
hash := fmt.Sprintf("%x", sum)
if len(hash) > 10 {
return hash[:10]
}
return hash
}
var (
firmwareGlobs = []string{
"*.bin",
"*.dat",
"*.elf",
"*.upd",
"*.sig",
}
kernelGlobs = []string{
"boot.scr", // u-boot script file
"vmlinuz",
"*.dtb",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents { | if ent.Filename == ent2.Filename {
f = ent
break
} | random_line_split |
|
write.go | b",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo | {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
} | identifier_body |
|
write.go | err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
}
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" | {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
} | conditional_block |
|
circular.menu.helpers.ts | vector v such that OP1 + OP2 = v
*/
const sumCoords = (p1, p2) => ({x: p1.x + p2.x, y: p1.y + p2.y})
/**
* Comptute vector v such that k * OP1 = v where k is a scalar (aka scalar multiplication)
*/
const scalarByCoords = (p1, k) => ({x: k * p1.x, y: k * p1.y})
/**
* Compute new rectangle with same dimensions but translated i.e. its upper left corner coordinate are translated by {deltaX, deltaY}
*/
const applyTranslatorToRect = ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, maxAngle);
}
}
/*****************************************************************************************
*
* Main Function
*
*****************************************************************************************/
/**
* Position nav items around a menu so that they fit within some angle.
*/
export default function | (){
// Constants to regulate the positioning algorithm
const angularSpace = Math.PI / 2;
const angularAnchor = Math.PI;
const menuExpansionSteps = 5;
// Node items involved
const navs = Array.from(document.querySelectorAll(".nav__item"));
const menu = document.querySelector(".hamburger-menu");
// Get center point of menu
const {center: viewPoint, | positionMenuItem | identifier_name |
circular.menu.helpers.ts | ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, maxAngle);
}
}
/*****************************************************************************************
*
* Main Function
*
*****************************************************************************************/
/**
* Position nav items around a menu so that they fit within some angle.
*/
export default function positionMenuItem() | {
// Constants to regulate the positioning algorithm
const angularSpace = Math.PI / 2;
const angularAnchor = Math.PI;
const menuExpansionSteps = 5;
// Node items involved
const navs = Array.from(document.querySelectorAll(".nav__item"));
const menu = document.querySelector(".hamburger-menu");
// Get center point of menu
const {center: viewPoint, radius: viewPointSize} = getCircleFromRect(menu.getBoundingClientRect())
// Try to compute a distance at which the nav items can nicely fit around the menu within an angle "angularSpace"
const {angle, rects} = expandMenuCircle(viewPoint, navs.map(nav => nav.getBoundingClientRect()), menuExpansionSteps, angularSpace);
// Compute the space between the nav items, so that they are nicely spread out to occupy the "angularSpace"
const angleGap = angularSpace/(navs.length - 1);
// Collect important points for display purposes
const coordsArr = [{item: 'menu', radius: viewPointSize, ...viewPoint}];
| identifier_body |
|
circular.menu.helpers.ts | */
const getDistance = (p1, p2 = {x:0,y:0}) => Math.sqrt((p2.x - p1.x)*(p2.x - p1.x) + (p2.y - p1.y)*(p2.y - p1.y));
/**
* Comptute vector v such that OP1 + v = OP2
*/
const getTranslator = (p1, p2 = {x:0,y:0}) => ({deltaX: p2.x - p1.x, deltaY: p2.y - p1.y});
/**
* Comptute vector v such that OP1 + OP2 = v
*/
const sumCoords = (p1, p2) => ({x: p1.x + p2.x, y: p1.y + p2.y})
/**
* Comptute vector v such that k * OP1 = v where k is a scalar (aka scalar multiplication)
*/
const scalarByCoords = (p1, k) => ({x: k * p1.x, y: k * p1.y})
/**
* Compute new rectangle with same dimensions but translated i.e. its upper left corner coordinate are translated by {deltaX, deltaY}
*/
const applyTranslatorToRect = ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, | * Distance between two points p1 and p2 | random_line_split |
|
DiffStreamOplogFilter.js | OplogFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
constructor(entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift()); | }
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
}
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in the | if (this.entriesToEmit.length === 0) {
this.push({ entry: null }); | random_line_split |
DiffStreamOplogFilter.js | OplogFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
| (entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift());
if (this.entriesToEmit.length === 0) {
this.push({ entry: null });
}
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
}
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in the | constructor | identifier_name |
DiffStreamOplogFilter.js | logFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
constructor(entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift());
if (this.entriesToEmit.length === 0) {
this.push({ entry: null });
}
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) |
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in | {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
} | conditional_block |
lib.rs | ::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_",
"___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn pr | self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x | int(& | identifier_name |
lib.rs | ::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_", | "___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + | random_line_split |
|
lib.rs | ::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_",
"___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
| lse {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x | &s.colors.dependency_code
} e | conditional_block |
lib.rs | ?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + 2) // indices are 1 based
.unwrap_or(0);
let bottom_cutoff = frames
.iter()
.position(|x| x.is_runtime_init_code())
.unwrap_or_else(|| frames.len());
let rng = top_cutoff..=bottom_cutoff;
frames.retain(|x| rng.contains(&x.n))
}
// ============================================================================================== //
// [BacktracePrinter] //
// ============================================================================================== //
/// Color scheme definition.
#[derive(Debug, Clone)]
pub struct ColorScheme {
pub frames_omitted_msg: ColorSpec,
pub header: ColorSpec,
pub msg_loc_prefix: ColorSpec,
pub src_loc: ColorSpec,
pub src_loc_separator: ColorSpec,
pub env_var: ColorSpec,
pub dependency_code: ColorSpec,
pub dependency_code_hash: ColorSpec,
pub crate_code: ColorSpec,
pub crate_code_hash: ColorSpec,
pub selected_src_ln: ColorSpec,
}
impl ColorScheme {
/// Helper to create a new `ColorSpec` & set a few properties in one wash.
fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec {
let mut cs = ColorSpec::new();
cs.set_fg(fg);
cs.set_bold(bold);
cs.set_intense(intense);
cs
}
/// The classic `color-backtrace` scheme, as shown in the screenshots.
pub fn classic() -> Self {
Self {
frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false),
header: Self::cs(Some(Color::Red), false, false),
msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false),
src_loc: Self::cs(Some(Color::Magenta), false, false),
src_loc_separator: Self::cs(Some(Color::White), false, false),
env_var: Self::cs(None, false, true),
dependency_code: Self::cs(Some(Color::Green), false, false),
dependency_code_hash: Self::cs(Some(Color::Black), true, false),
crate_code: Self::cs(Some(Color::Red), true, false),
crate_code_hash: Self::cs(Some(Color::Black), true, false),
selected_src_ln: Self::cs(None, false, true),
}
}
}
impl Default for ColorScheme {
fn default() -> Self {
Self::classic()
}
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")]
pub type Settings = BacktracePrinter;
/// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs.
#[derive(Clone)]
pub struct BacktracePrinter {
message: String,
verbosity: Verbosity,
lib_verbosity: Verbosity,
strip_function_hash: bool,
is_panic_handler: bool,
colors: ColorScheme,
filters: Vec<Arc<FilterCallback>>,
should_print_addresses: bool,
}
impl Default for BacktracePrinter {
fn default() -> Self {
Self {
verbosity: Verbosity::from_env(),
lib_verbosity: Verbosity::lib_from_env(),
message: "The application panicked (crashed).".to_owned(),
strip_function_hash: false,
colors: ColorScheme::classic(),
is_panic_handler: false,
filters: vec![Arc::new(default_frame_filter)],
should_print_addresses: false,
}
}
}
impl std::fmt::Debug for BacktracePrinter {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Settings")
.field("message", &self.message)
.field("verbosity", &self.verbosity)
.field("lib_verbosity", &self.lib_verbosity)
.field("strip_function_hash", &self.strip_function_hash)
.field("is_panic_handler", &self.is_panic_handler)
.field("print_addresses", &self.should_print_addresses)
.field("colors", &self.colors)
.finish()
}
}
/// Builder functions.
impl BacktracePrinter {
/// Alias for `BacktracePrinter::default`.
pub fn new() -> Self {
| Self::default()
}
| identifier_body |
|
u2eve.py | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
| LOG.warn("WARNING: No alert message map entries loaded.") | conditional_block |
|
u2eve.py | OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def | (self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os | getprotobynumber | identifier_name |
u2eve.py | OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
|
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os | if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush() | identifier_body |
u2eve.py | # Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar | #! /usr/bin/env python
# | random_line_split |
|
executor.go | (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods |
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as | {
update.Pods = append(update.Pods, *p)
} | conditional_block |
executor.go | .GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as soon as it restarts?
knownPod := func() bool {
_, err := k.getPidInfo(podFullName)
return err == nil
}
// Wait for the pod to go away and stop monitoring once it does
// TODO (jdefelice) replace with an /events watch?
for {
time.Sleep(containerPollTime)
if k.checkForLostPodTask(driver, taskId, knownPod) {
return
}
}
}
// Intended to be executed as part of the pod monitoring loop, this fn (ultimately) checks with Docker
// whether the pod is running. It will only return false if the task is still registered and the pod is
// registered in Docker. Otherwise it returns true. If there's still a task record on file, but no pod
// in Docker, then we'll also send a TASK_LOST event.
func (k *KubernetesExecutor) checkForLostPodTask(driver bindings.ExecutorDriver, taskId string, isKnownPod func() bool) bool {
// TODO (jdefelice) don't send false alarms for deleted pods (KILLED tasks)
k.lock.Lock()
defer k.lock.Unlock()
// TODO(jdef) we should really consider k.pods here, along with what docker is reporting, since the kubelet
// may constantly attempt to instantiate a pod as long as it's in the pod state that we're handing to it.
// otherwise, we're probably reporting a TASK_LOST prematurely. Should probably consult RestartPolicy to
// determine appropriate behavior. Should probably also gracefully handle docker daemon restarts.
if _, ok := k.tasks[taskId]; ok {
if isKnownPod() {
return false
} else {
log.Warningf("Detected lost pod, reporting lost task %v", taskId)
k.reportLostTask(driver, taskId, messages.ContainersDisappeared)
} | } else {
log.V(2).Infof("Task %v no longer registered, stop monitoring for lost pods", taskId)
}
return true | random_line_split |
|
executor.go | (from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
| swapState | identifier_name |
|
executor.go |
func (k *KubernetesExecutor) swapState(from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
| {
return connectedState == k.getState()
} | identifier_body |
|
IoManager.py | urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
|
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com | print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df | identifier_body |
IoManager.py | n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df |
@staticmethod | random_line_split |
|
IoManager.py | ():
"""
:return: list of card names, cube list
"""
f = open(IoManager.CUBE_LIST_FILE_PATH, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def get_cards_base_info():
"""
:return: DataFrame containing data for each card, such as power, toughness, urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data | get_cube_list | identifier_name |
|
IoManager.py | IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df
@staticmethod
def clean_double_faced_from_cube_list(cube_list_file_name):
# removes the second face name for each double faced card
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
def rm_second_face(line):
if "//" in line:
return line.split(" //")[0] + "\n"
return line
lines = [rm_second_face(l) for l in lines]
f = open("data/" + cube_list_file_name, "w")
f.write("".join(lines))
f.close()
@staticmethod
def get_cube_list(cube_list_file_name):
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def infer_new_cards(cube_list, output_csv_name):
prev_ratings = pd.read_csv("data/" + output_csv_name)
new_cards = [c for c in cube_list if c not in prev_ratings.name.to_list()]
print("There are {} new cards: \n{}".format(len(new_cards), new_cards))
return new_cards
@staticmethod
# gets the cube list, fetches the data for each card, and saves the data as a csv
def fetch_cube_data(cube_list_file_name, output_csv_name):
cube_list = DataFetcher.get_cube_list(cube_list_file_name)
new_cards = DataFetcher.infer_new_cards(cube_list, output_csv_name=output_csv_name)
if not new_cards:
return pd.DataFrame()
cube_data = DataFetcher.get_cards_data(new_cards)
return cube_data
@staticmethod
# creates seperate features to store each img url
def clean_image_urls(cube_data):
for lang in ["en", "fr"]:
for image_type in ["small", "normal", "large", "png"]:
feature_name = "img_" + lang + "_" + image_type
current_feature = "image_uris" if lang == "en" else "image_uris_fr"
cube_data[feature_name] = cube_data[current_feature].apply(
lambda d: d[image_type] if type(d) != float and d != None and image_type in d else np.nan)
@staticmethod
def clean_colors(cube_data):
colors = ["W", "U", "B", "R", "G"]
color_pairs = ["WU", "WB", "WR", "WG", "UB", "UR", "UG", "BR", "BR", "RG"]
for color in colors:
cube_data[color] = cube_data["color_identity"].apply(lambda l: 1 if color in l else 0)
for c, c2 in color_pairs:
cube_data[c + c2] = cube_data["color_identity"].apply(lambda l: 1 if c in l and c2 in l else 0)
@staticmethod
def clean_type_line(cube_data):
cube_data["type_line"] = cube_data["type_line"].str.replace(' —', ':')
@staticmethod
def clean_cmc(cube_data):
cube_data["cmc"] = cube_data["cmc"].astype(int)
@staticmethod
def remove_old_columns(cube_data):
old_columns = ["image_uris", "image_uris_fr"]
valid_columns = [c for c in cube_data.columns if c not in old_columns]
return cube_data[valid_columns]
@staticmethod
def clean_booleans(cube_data):
cube_data["highres_image"] = cube_data["highres_image"].astype(int)
@staticmethod
def clean_cube_data(cube_data):
DataFetcher.clean_image_urls(cube_data)
DataFetcher.clean_colors(cube_data)
DataFetcher.clean_type_line(cube_data)
DataFetcher.clean_cmc(cube_data)
DataFetcher.clean_booleans(cube_data)
return DataFetcher.remove_old_columns(cube_data)
@staticmethod
def save_csv(cube_data, output_csv_name, cube_list_file_name):
current_data = pd.read_csv("data/" + output_csv_name)
new_cards = DataFetcher.infer_new_cards(cube_list=DataFetcher.get_cube_list(cube_list_file_name=cube_list_file_name), output_csv_name=output_csv_name)
new_rows = cube_data[cube_data.name.isin(new_cards)]
new_cube_data = current_data.append(new_rows).reset_index(drop=True)
new_cube_data.to_csv("data/" + output_csv_name, index=False)
return new_cube_data
@staticmethod
# does it all
def fetch_clean_save(cube_list_file_name, output_csv_name):
cube_data = DataFetcher.fetch_cube_data(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if len(cube_data.index) == 0:
return None
cube_data_clean = DataFetcher.clean_cube_data(cube_data)
return DataFetcher.save_csv(cube_data=cube_data, output_csv_name=output_csv_name, cube_list_file_name=cube_list_file_name)
class RatingsInitializer:
@staticmethod
def prepare_new_ratings(archetypes):
new_ratings = RatingsInitializer.setup_for_new_cards(RatingsInitializer.load_cards_df(), archetypes)
if new_ratings is not None:
Ra | tingsInitializer.save_csv(new_ratings)
exit()
| conditional_block |
|
clean_summaries.py | 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
| summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not belong | if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword | random_line_split |
clean_summaries.py | 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def | (line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do | remove_summary_analysis_prefix | identifier_name |
clean_summaries.py | novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summ |
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not | ary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0 | conditional_block |
clean_summaries.py | 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
|
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do | pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit() | identifier_body |
Util.js | m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) {
// TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
},
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
const _t = this;
fields.forEach(function(f) {
_t.setAttribute(element, f, value);
});
} else if (value === null) {
element.removeAttribute(name);
} else if (element.getAttribute(name) !== value) {
element.setAttribute(name, value);
}
},
/**
* Gets the CSS `:not` selector for the given comma-separated list of attributes with the specified value (if any).
* @param {string} name Comma separated list of attributes.
* @param {string} value The value to match.
* @return {QuerySelectors} The query selectors.
* @memberof Utils.dom
*/
cssNot(name, value) {
const fields = name.split(',');
let selector = '';
fields.forEach(function(v, i) {
if (v.startsWith('.')) {
selector += ':not(' + v + ')';
} else if (value != null) {
selector += ':not([' + v + '="' + value + '"])';
} else {
selector += ':not([' + v + '])';
}
if (i < fields.length - 1) selector += ',';
});
return (function(s) {
/** @type {QuerySelectors} */
return {
get(i) {
const selectors = s.split(',');
return (i >= selectors.length || i == null) ? selectors[0] : selectors[i];
},
// eslint-disable-next-line no-unused-vars
getAll() {
const selectors = s.split(',');
return selectors.join('');
}
};
})(selector);
},
/**
* Return the ShadowRoot node containing the given element, or false if not in a shadow DOM.
* @param {Node} node The node element.
* @return {ShadowRoot|boolean} The ShadowRoot or false.
* @memberof Utils.dom
*/
| getShadowRoot | identifier_name |
|
Util.js | */
'use strict';
/**
* @typedef QuerySelectors
* @method {function(): string} getAll
* @method {function(i: number): string} get
*/
/**
* Common utility functions.
* @namespace Utils
*/
const Utils = {
/**
* Returns true only if object is null || undefined
* @param {object} obj The object to test.
* @return {boolean} True if null or undefined, otherwise false.
* @memberOf Utils
*/
isNoU(obj) {
return (typeof obj === 'undefined' || obj === null);
},
/**
* Gets object property given its name
* @param {object} o The object to get property from.
* @param {string} s The property path (dotted/indexed form).
* @return {object|undefined} The property matching the given path.
* @memberOf Utils
*/
propertyFromPath(o, s) {
if (typeof s !== 'string' || o == null) {
return;
}
try {
if (typeof o[s] !== 'undefined') {
return o[s];
}
} catch (e) {
// TODO: "TypeError: Cannot create proxy with a non-object as target or handler"
console.log(e);
}
let ref = o; let path = '';
const parts = s.match(/\[(".*?"|'.*?'|(.*?))\]|".*?"|'.*?'|[0-9a-zA-Z_$]+/g);
for (let i = 0; i < parts.length; i++) {
let m = parts[i];
if (m.startsWith('[') && m.endsWith(']')) {
m = m.substring(1, m.length - 1).trim();
}
if (m.startsWith('"') && m.endsWith('"')) {
m = m.substring(1, m.length - 1);
} else if (m.startsWith('\'') && m.endsWith('\'')) {
m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) {
// TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) | ,
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string | {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
} | identifier_body |
Util.js |
*/
'use strict';
/**
* @typedef QuerySelectors
* @method {function(): string} getAll
* @method {function(i: number): string} get
*/
/**
* Common utility functions.
* @namespace Utils
*/
const Utils = {
/**
* Returns true only if object is null || undefined
* @param {object} obj The object to test.
* @return {boolean} True if null or undefined, otherwise false.
* @memberOf Utils
*/
isNoU(obj) {
return (typeof obj === 'undefined' || obj === null);
},
/**
* Gets object property given its name
* @param {object} o The object to get property from.
* @param {string} s The property path (dotted/indexed form).
* @return {object|undefined} The property matching the given path.
* @memberOf Utils
*/
propertyFromPath(o, s) {
if (typeof s !== 'string' || o == null) {
return;
}
try {
if (typeof o[s] !== 'undefined') {
return o[s];
}
} catch (e) {
// TODO: "TypeError: Cannot create proxy with a non-object as target or handler"
console.log(e);
}
let ref = o; let path = '';
const parts = s.match(/\[(".*?"|'.*?'|(.*?))\]|".*?"|'.*?'|[0-9a-zA-Z_$]+/g);
for (let i = 0; i < parts.length; i++) {
let m = parts[i];
if (m.startsWith('[') && m.endsWith(']')) {
m = m.substring(1, m.length - 1).trim();
}
if (m.startsWith('"') && m.endsWith('"')) {
m = m.substring(1, m.length - 1);
} else if (m.startsWith('\'') && m.endsWith('\'')) {
m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) { | // TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
},
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string' | random_line_split |
|
lib.rs | mounts().unwrap() {
/// println!("{}", mount.unwrap());
/// }
pub struct Mounts {
buf_reader: std::io::BufReader<std::fs::File>
}
impl Mounts {
/// Returns a new Mounts instance. You can also call [mounts()] for convenience.
pub fn new() -> std::result::Result<Mounts, std::io::Error> {
let file = std::fs::File::open("/proc/mounts")?;
Ok( Mounts { buf_reader: std::io::BufReader::new(file) } )
}
}
impl IntoIterator for Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIntoIterator;
/// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example.
fn into_iter(self) -> Self::IntoIter {
MountsIntoIterator { lines: self.buf_reader.lines() }
}
}
impl<'a> IntoIterator for &'a mut Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIteratorMut<'a>;
/// Mutable iterator, see [Mounts::iter_mut()].
fn into_iter(self) -> Self::IntoIter {
MountsIteratorMut { lines: self.buf_reader.by_ref().lines() }
}
}
/// Consuming iterator for [Mounts].
pub struct MountsIntoIterator {
lines: std::io::Lines<std::io::BufReader<std::fs::File>>
}
impl std::iter::Iterator for MountsIntoIterator {
type Item = std::result::Result<Mount, BoxError>;
/// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
/// Mutable iterator for `Mounts`.
pub struct MountsIteratorMut<'a> {
lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>>
}
impl<'a> std::iter::Iterator for MountsIteratorMut<'a> {
type Item = std::result::Result<Mount, BoxError>;
// Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
impl<'a> Mounts {
// There is no non-mutable iterator.
/// Mutable iterator.
/// # Examples
/// ```
/// # use nom_tutorial;
/// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter();
/// match iter.next() {
/// Some(m) => match m {
/// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m),
/// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e)
/// },
/// None => eprintln!("There are no mounted filesystems.")
/// }
/// ```
pub fn | (&'a mut self) -> MountsIteratorMut<'a> {
self.into_iter()
}
}
// Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate.
pub(self) mod parsers {
use super::Mount;
// Extract a string that does not contain whitespace (space or tab). Anything else goes.
fn not_whitespace(i: &str) -> nom::IResult<&str, &str> {
nom::bytes::complete::is_not(" \t")(i)
}
// Replace the sequence 040 with a space.
fn escaped_space(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i)
}
// Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails.
fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::recognize(nom::character::complete::char('\\'))(i)
}
// Replace all instances of \040 in a string with a space.
// Replace \\ with a \.
fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> {
nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i)
}
// Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace.
// Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`.
fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> {
nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i)
}
// Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom | iter_mut | identifier_name |
lib.rs | defined above. Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0,
)))(i) {
Ok((remaining_input, (
device,
_, // whitespace
mount_point,
_, // whitespace
file_system_type,
_, // whitespace
options,
_, // whitespace
_, // 0
_, // whitespace
_, // 0
_, // optional whitespace
))) => {
Ok((remaining_input, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options: options
}))
}
Err(e) => Err(e)
}
}
// Alternative version of `parse_line()` above that performs the same
// function using a different style. Rather than parsing the entire line at
// once with one big `nom::sequence::tuple` we break the parsing up into
// multiple separate statements. Each statement runs a parser that returns
// an `Ok(remaining_input, value)`. At the end of each statement we have
// the `?` operator, which unwraps the result and returns early with an
// error if parsing failed. The remaining input from each parser is used as
// the input of each subsequent parser. Values are assigned to temporary
// variables that are used to construct a `Mount` object at the end of the
// function. Values that are not needed are discarded by assigning to `_`.
#[allow(unused)]
pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> {
let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device
let (i, _) = nom::character::complete::space1(i)?;
let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point
let (i, _) = nom::character::complete::space1(i)?;
let (i, file_system_type) = not_whitespace(i)?; // file_system_type
let (i, _) = nom::character::complete::space1(i)?;
let (i, options) = mount_opts(i)?; // options
let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple((
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0
)))(i)?;
Ok((i, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options:options
}))
}
#[cfg(test)]
mod tests {
use super::*;
// Extracts a string that does not contain whitespace, i.e. comma or tab.
#[test]
fn test_not_whitespace() {
assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd")));
assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd")));
assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot))));
}
// Converts 040 to a space. Does not actually recognize a literal space.
#[test]
fn test_escaped_space() {
assert_eq!(escaped_space("040"), Ok(("", " ")));
assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag))));
}
// Converts `char` \ to `&str` \.
#[test]
fn test_escaped_backslash() {
assert_eq!(escaped_backslash("\\"), Ok(("", "\\")));
assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char))));
}
// Recognizes each escape sequence and transfoms it to the escaped literal.
// For example, each \040 is transformed into a space.
#[test]
fn test_transform_escaped() {
assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h"))));
assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag))));
}
// Parses a comma separated list of mount options, which might contain spaces.
#[test]
fn test_mount_opts() {
assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()])));
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line_alternate() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
}
}
/// Convenience method equivalent to `Mounts::new()`.
pub fn mounts() -> std::result::Result<Mounts, std::io::Error> { | Mounts::new() | random_line_split |
|
lib.rs | . Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0,
)))(i) {
Ok((remaining_input, (
device,
_, // whitespace
mount_point,
_, // whitespace
file_system_type,
_, // whitespace
options,
_, // whitespace
_, // 0
_, // whitespace
_, // 0
_, // optional whitespace
))) => {
Ok((remaining_input, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options: options
}))
}
Err(e) => Err(e)
}
}
// Alternative version of `parse_line()` above that performs the same
// function using a different style. Rather than parsing the entire line at
// once with one big `nom::sequence::tuple` we break the parsing up into
// multiple separate statements. Each statement runs a parser that returns
// an `Ok(remaining_input, value)`. At the end of each statement we have
// the `?` operator, which unwraps the result and returns early with an
// error if parsing failed. The remaining input from each parser is used as
// the input of each subsequent parser. Values are assigned to temporary
// variables that are used to construct a `Mount` object at the end of the
// function. Values that are not needed are discarded by assigning to `_`.
#[allow(unused)]
pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> {
let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device
let (i, _) = nom::character::complete::space1(i)?;
let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point
let (i, _) = nom::character::complete::space1(i)?;
let (i, file_system_type) = not_whitespace(i)?; // file_system_type
let (i, _) = nom::character::complete::space1(i)?;
let (i, options) = mount_opts(i)?; // options
let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple((
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0
)))(i)?;
Ok((i, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options:options
}))
}
#[cfg(test)]
mod tests {
use super::*;
// Extracts a string that does not contain whitespace, i.e. comma or tab.
#[test]
fn test_not_whitespace() {
assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd")));
assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd")));
assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot))));
}
// Converts 040 to a space. Does not actually recognize a literal space.
#[test]
fn test_escaped_space() {
assert_eq!(escaped_space("040"), Ok(("", " ")));
assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag))));
}
// Converts `char` \ to `&str` \.
#[test]
fn test_escaped_backslash() {
assert_eq!(escaped_backslash("\\"), Ok(("", "\\")));
assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char))));
}
// Recognizes each escape sequence and transfoms it to the escaped literal.
// For example, each \040 is transformed into a space.
#[test]
fn test_transform_escaped() {
assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h"))));
assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag))));
}
// Parses a comma separated list of mount options, which might contain spaces.
#[test]
fn test_mount_opts() {
assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()])));
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line_alternate() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
}
}
/// Convenience method equivalent to `Mounts::new()`.
pub fn mounts() -> std::result::Result<Mounts, std::io::Error> | {
Mounts::new()
} | identifier_body |
|
stringutils.go | "mns:eventdelivery": "string",
"mns:eventdescription": "string",
"mns:eventsubscription": "string",
"mns:embeddedserviceliveagent": "string",
"mns: flow": "string",
"https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/meta_embeddedservicebranding.htm": "string",
"restricted picklist": "string",
"multipicklist": "string",
// boolean-ish types
"boolean": "bool",
"checkbox": "bool",
// numeric-ish types
"int": "int",
"currency": "float64",
"number": "int",
"double": "float64",
"long": "int64",
"int64": "int64",
"integer": "int",
"decimal": "float64",
"percent": "float64",
// complex types
"address": "types.Address",
"date": "types.Date",
"datetime": "types.Datetime",
"date/time": "types.Datetime",
"object": "json.RawMessage",
"queryresult": "types.QueryResult",
"anytype": "json.RawMessage",
"types.datetime": "types.Datetime",
"types.address": "types.Address",
"types.date": "types.Date",
"time.time": "time.Time",
"time": "types.Datetime",
"base64": "string",
"json.rawmessage": "json.RawMessage",
"any": "interface{}",
"types.queryresult": "types.QueryResult",
"symboltable": "json.RawMessage",
"apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{" | "mns:embeddedservicefieldservice": "string",
"mns: customobject": "string", | random_line_split |
|
stringutils.go | json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
}
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext {
if vIsLow {
v += 'A'
v -= 'a'
}
} else if i == 0 {
if vIsCap {
v += 'a'
v -= 'A'
}
}
if vIsCap || vIsLow {
n.WriteByte(v)
capNext = false
} else if vIsNum := v >= '0' && v <= '9'; vIsNum {
n.WriteByte(v)
capNext = true
} else {
capNext = v == '_' || v == ' ' || v == '-' || v == '.'
}
}
return n.String()
}
// ToCamelCase converts a string to CamelCase
func | toCamelCase | identifier_name |
|
stringutils.go | ",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
}
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext | {
if vIsLow {
v += 'A'
v -= 'a'
}
} | conditional_block |
|
stringutils.go | apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string | {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
} | identifier_body |
|
deepracer_racetrack_env.py | ING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def | (self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(ne | step | identifier_name |
deepracer_racetrack_env.py | _state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
| self.is_simulation_done = True | conditional_block |
|
deepracer_racetrack_env.py | TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
|
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def step(self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(ne | if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False | identifier_body |
deepracer_racetrack_env.py | _wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
self.is_simulation_done = True
return self.is_simulation_done
def is_number(self, value_to_check):
try:
float(value_to_check)
return True
except ValueError:
return False
def cancel_simulation_job(self):
self.send_action(0, 0)
session = boto3.session.Session()
robomaker_client = session.client('robomaker', region_name=self.aws_region)
robomaker_client.cancel_simulation_job(
job=self.simulation_job_arn
)
def send_reward_to_cloudwatch(self, reward):
isLocal = os.environ.get("LOCAL")
if isLocal == None:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': self.metric_name,
'Dimensions': [
{
'Name': 'TRAINING_JOB_ARN',
'Value': self.training_job_arn
},
],
'Unit': 'None',
'Value': reward
}, | ], | random_line_split |
|
rabbitmq_server_relations.py | deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
|
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s | log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return | conditional_block |
rabbitmq_server_relations.py |
def update_clients():
"""Update amqp client relation hooks
IFF leader node is ready. Client nodes are considered ready once the leader
has already run amqp_changed.
"""
if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
host_addr = rabbit.get_unit_ip()
# TODO: Simplify what the non-leader needs to do
if not is_leader() and rabbit.client_node_is_ready():
# NOTE(jamespage) clear relation to deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbit | password = rabbit.get_rabbit_password(username)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password | identifier_body |
|
rabbitmq_server_relations.py | rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not is_clustered():
return
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
log('Start Ceph Relation Joined')
# NOTE fixup
# utils.configure_source()
ceph.install()
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def | update_nrpe_checks | identifier_name |
|
rabbitmq_server_relations.py | ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_checks():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq_queues.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
if config('stats_cron_schedule'):
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
rsync(os.path.join(charm_dir(), 'scripts',
'collect_rabbitmq_stats.sh'), script)
write_file(STATS_CRONFILE, cronjob)
elif os.path.isfile(STATS_CRONFILE):
os.remove(STATS_CRONFILE)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
myunit = nrpe.get_nagios_unit_name()
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user, local=True)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
nrpe_compat = nrpe.NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
if config('queue_thresholds'):
cmd = ""
# If value of queue_thresholds is incorrect we want the hook to fail
for item in yaml.safe_load(config('queue_thresholds')):
cmd += ' -c "{}" "{}" {} {}'.format(*item)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER + '_queue',
description='Check RabbitMQ Queues',
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
)
nrpe_compat.write()
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
pre_install_hooks()
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered
for f in [f for f in os.listdir('/var/lib/juju')
if os.path.isfile(os.path.join('/var/lib/juju', f))]:
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
if is_elected_leader('res_rabbitmq_vip'):
rabbit.migrate_passwords_to_peer_relation()
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
@hooks.hook('config-changed')
@rabbit.restart_on_change(rabbit.restart_map())
@harden()
def config_changed():
# Update hosts with this unit's information
rabbit.update_hosts_file(
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE):
rabbit.get_unit_hostname()})
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Copy in defaults file for updated ulimits
shutil.copyfile(
'templates/rabbitmq-server',
'/etc/default/rabbitmq-server')
# Install packages to ensure any changes to source
# result in an upgrade if applicable.
status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(rabbit.get_managment_port())
else:
rabbit.disable_plugin(MAN_PLUGIN)
close_port(rabbit.get_managment_port())
# LY: Close the old managment port since it may have been opened in a
# previous version of the charm. close_port is a noop if the port
# is not open
close_port(55672)
rabbit.ConfigRenderer(
rabbit.CONFIG_FILES).write_all()
# Only set values if this is the leader
if not is_leader():
return
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# Update cluster in case min-cluster-size has changed
for rid in relation_ids('cluster'):
for unit in related_units(rid):
cluster_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('leader-elected')
def leader_elected():
status_set("maintenance", "{} is the elected leader".format(local_unit()))
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
if not os.path.exists(rabbit.RABBITMQ_CTL):
log('Deferring cookie configuration, RabbitMQ not yet installed')
return
# Get cookie from leader, update cookie locally and
# force cluster-relation-changed hooks to run on peers
cookie = leader_get(attribute='cookie')
if cookie:
update_cookie(leaders_cookie=cookie)
# Force cluster-relation-changed hooks to run on peers
# This will precipitate peer clustering
# Without this a chicken and egg scenario prevails when
# using LE and peerstorage
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
def pre_install_hooks():
for f in glob.glob('exec.d/*/charm-pre-install'):
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try: | hooks.execute(sys.argv)
except UnregisteredHookError as e: | random_line_split |
|
preference_aggregation_featureless_online.py | 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
| hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING | initial = initial_value[ind]['metrics'][key]
| random_line_split |
preference_aggregation_featureless_online.py | 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
|
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPT | online.golden_params[key] = value | conditional_block |
preference_aggregation_featureless_online.py | (object):
"""Update weights online."""
def __init__(self, hypers=None, golden_params=None):
if golden_params is None:
golden_params = {}
self.golden_params = golden_params
self.hypers = hypers
self.model_tensor = None
self.minibatch = None
self.to_subtract = {}
self.silent = False
def current_loss(self, key='loss'):
return self.get_closure_loss((0, 0, 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = | FeaturelessOnlineUpdater | identifier_name |
|
preference_aggregation_featureless_online.py | 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
| plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# | for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
| identifier_body |
smd.rs | ::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
| }
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let | (translation, rotation) | random_line_split |
smd.rs | filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
}
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties);
// Now add up all the translations applied after rotation
let rotation_pivot: Vector3<_> = props.rotation_pivot.into();
let scale_offset: Vector3<_> = props.scale_offset.into();
let translation = -rotation_pivot + scale_offset;
translation
}
fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> {
// Create various matrices
let rotation_offset = properties.rotation_offset.into();
let rotation_offset_mat = Matrix4::from_translation(rotation_offset);
let rotation_pivot: Vector3<_> = properties.rotation_pivot.into();
let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot);
let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation);
let rotation = euler_rotation_to_matrix(properties.rotation);
let post_rotation = euler_rotation_to_matrix(properties.post_rotation);
let scale_offset = properties.scale_offset.into();
let scale_offset_mat = Matrix4::from_translation(scale_offset);
let scale_pivot: Vector3<_> = properties.scale_pivot.into();
let scale_pivot_mat = Matrix4::from_translation(scale_pivot);
let scale = Matrix4::from_nonuniform_scale(
properties.scale[0],
properties.scale[1],
properties.scale[2]
);
let local_matrix_for_vertices =
Matrix4::from_translation(properties.translation.into()) *
// Rotation
rotation_offset_mat *
rotation_pivot_mat *
pre_rotation *
rotation *
post_rotation.invert().unwrap() *
rotation_pivot_mat.invert().unwrap() *
// Scale
scale_offset_mat *
scale_pivot_mat *
scale *
scale_pivot_mat.invert().unwrap();
local_matrix_for_vertices
}
fn | euler_rotation_to_matrix | identifier_name |
|
smd.rs | ::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) | ));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
}
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
| {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2]) | identifier_body |
smd.rs | ::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip | else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
}
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
| {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} | conditional_block |
tf_linear_reg.py | == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def | (filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
| input_pipeline | identifier_name |
tf_linear_reg.py | elif optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS | tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
| # Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"] | random_line_split |
tf_linear_reg.py | optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
|
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
| dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator() | identifier_body |
tf_linear_reg.py | == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
|
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
| print("Unknow optimizer, exit now")
exit(1) | conditional_block |
lexer.go | l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() |
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
| {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
} | identifier_body |
lexer.go | l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) | () {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
| moveLocation | identifier_name |
lexer.go | ANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
default:
l.reportError(fmt.Sprintf("Invalid escape character '%s", string(l.char)))
}
case 0:
// String should be closed before the end of file.
l.reportError("Unterminated string")
break loop
case '"': // Closing quote.
break loop
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.STRING, out.String())
}
// Read a numeric literal.
func (l *Lexer) consumeNumeric() {
var out bytes.Buffer
// Write the first character, as we're sure
// it's numeric.
out.WriteRune(l.char)
floatFound := false
scientificFound := false
loop:
for {
l.advance()
switch {
case l.isNumber(l.char):
out.WriteRune(l.char)
case l.char == '_': // Thousands separator is ignored.
case l.char == '.' && l.isNumber(l.peek()): // Float.
floatFound = true
out.WriteRune('.')
case l.char == 'e' && (l.isNumber(l.peek()) || l.peek() == '-'): // Scientific notation.
// Numbers in scientific notation are
// treated as floats for easy of use.
floatFound = true
scientificFound = true
out.WriteRune('e')
case l.char == '-' && scientificFound: // Negative scientific notation.
out.WriteRune('-')
case l.char == '.' && l.peek() == '.': // Range operator.
l.rewind()
break loop
case l.char == 0: // Don't rewind on EOF.
break loop
default:
l.rewind()
break loop
}
}
if floatFound {
l.assignToken(token.FLOAT, out.String())
} else {
l.assignToken(token.INTEGER, out.String())
}
}
// Read a binary, octal or hexadecimal literal.
func (l *Lexer) consumeSpecialInteger(fn func(rune) bool) {
var out bytes.Buffer
out.WriteRune(l.char)
out.WriteRune(l.peek())
// Move past the 'x', 'b' or 'o'.
l.advance()
for fn(l.peek()) {
out.WriteRune(l.peek())
l.advance()
}
ret := out.String()
// A starter like '0x' without other characters
// is not enough to make up an Integer.
if len(ret) == 2 {
l.reportError(fmt.Sprintf("Literal sequence '%s' started but not continued", ret))
}
l.assignToken(token.INTEGER, ret)
}
// Read a single line comment.
func (l *Lexer) consumeComment() {
var out bytes.Buffer
l.advance()
loop:
for {
switch l.char {
case '\n', 0: // Comment ends on a line break or EOF
break loop
case '\r': // Or possibly on a \r\n
l.advance()
switch l.char {
case '\n', 0: | break loop
default:
l.reportError("Unexpected comment line ending")
break loop | random_line_split |
|
lexer.go | l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded |
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
| {
l.moveLocation()
} | conditional_block |
watcher.go | the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() | lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
| {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex | identifier_body |
watcher.go | the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error { |
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
| var err error | random_line_split |
watcher.go | Leaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil | {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
_, ok := w.services[down.Service.Name]
w.lock.Unlock()
if !ok {
if first {
w.ready.Add(3)
}
w.startService(down, first)
}
}
} | conditional_block |
|
watcher.go | the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) | () {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock | Reload | identifier_name |
practice.js | overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() | console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
| {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
} | identifier_body |
practice.js | ();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do | {
var numA = getRandomInt(1,10);
} | conditional_block |
|
practice.js | 0;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do {
var numA = getRandomInt(1,10);
} while (answer%numA != 0)
var numB = answer / numA;
var numSet = [numA, numB];
return numSet;
}
function generateDivision(answer) {
var numA = getRandomInt(1, 10);
var numB = answer * numA;
var numSet = [numB, numA];
return numSet;
}
// Move all objects up one position (overwritting the first)
function advanceRows(newQuestion) {
console.log("advanceRows()");
// Animations: (Individually animate each one)
// Bottom question
questions[0].animateGone();
// 2nd question
questions[1].animate1stPosition();
// 3rd question
questions[2].animate2ndPosition();
// New question
newQuestion.animate3rdPosition();
// Advance the questions internally
questions[0] = questions[1];
questions[1] = questions[2];
questions[2] = newQuestion
}
function advanceAnswers(nextAnswer) {
// Animations:
// Current answer
currentAnswer.animateGone();
// Next answer
nextAnswer.animateNew();
// Advance (replace) the answer internally
answers[nextAnswer.index] = nextAnswer; // Replace parent
}
// ANSWER CHECKING
function | checkAnswer | identifier_name |
|
practice.js | overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
| function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
| // Gathers are all the necessary info before generating the next answer | random_line_split |
in_memory.rs | ) fn new<T>(consumer: T) -> Self
where
T: Into<Option<Consumer<HashId>>>,
{
Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: consumer.into(),
new_ids: Vec::with_capacity(1024),
values_bytes: 0,
}
}
pub fn get_memory_usage(&self) -> RepositoryMemoryUsage {
let values_bytes = self.values_bytes;
let values_capacity = self.values.capacity();
let hashes_capacity = self.hashes.capacity();
let total_bytes = values_bytes
.saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>())
.saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters
.saturating_add(hashes_capacity * size_of::<ObjectHash>());
RepositoryMemoryUsage {
values_bytes,
values_capacity,
values_length: self.values.len(),
hashes_capacity,
hashes_length: self.hashes.len(),
total_bytes,
npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0),
gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire),
nshapes: 0,
}
}
pub(crate) fn clear(&mut self) {
*self = Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: self.free_ids.take(),
new_ids: Vec::new(),
values_bytes: 0,
}
}
pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> {
let (hash_id, entry) = if let Some(free_id) = self.get_free_id() {
if let Some(old_value) = self.values.set(free_id, None)? {
self.values_bytes = self.values_bytes.saturating_sub(old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
} | self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) |
fn get_free_id(&mut self) -> Option<HashId> { | random_line_split |
in_memory.rs | (old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
}
fn get_free_id(&mut self) -> Option<HashId> {
self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> {
self.hashes.get_value(hash_id).map_err(Into::into)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.hashes.contains(hash_id).map_err(Into::into)
}
pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
for (hash_id, value) in batch {
self.hashes.insert_value_at(hash_id, Arc::clone(&value))?;
self.current_cycle.insert(hash_id, Some(value));
}
Ok(())
}
pub fn new_cycle_started(&mut self) {
if let Some(sender) = &self.sender {
let values_in_cycle = std::mem::take(&mut self.current_cycle);
let new_ids = self.hashes.take_new_ids();
if let Err(e) = sender.try_send(Command::StartNewCycle {
values_in_cycle,
new_ids,
}) {
eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e);
}
if let Some(unused) = self.context_hashes_cycles.pop_front() {
for hash in unused {
self.context_hashes.remove(&hash);
}
}
self.context_hashes_cycles.push_back(Default::default());
}
}
pub fn block_applied(&mut self, reused: Vec<HashId>) {
if let Some(sender) = &self.sender {
if let Err(e) = sender.send(Command::MarkReused { reused }) {
eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e);
}
}
}
pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> | {
let mut hasher = DefaultHasher::new();
hasher.write(context_hash.as_ref());
let hashed = hasher.finish();
self.context_hashes.get(&hashed).cloned()
} | identifier_body |
|
in_memory.rs | ) fn new<T>(consumer: T) -> Self
where
T: Into<Option<Consumer<HashId>>>,
{
Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: consumer.into(),
new_ids: Vec::with_capacity(1024),
values_bytes: 0,
}
}
pub fn | (&self) -> RepositoryMemoryUsage {
let values_bytes = self.values_bytes;
let values_capacity = self.values.capacity();
let hashes_capacity = self.hashes.capacity();
let total_bytes = values_bytes
.saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>())
.saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters
.saturating_add(hashes_capacity * size_of::<ObjectHash>());
RepositoryMemoryUsage {
values_bytes,
values_capacity,
values_length: self.values.len(),
hashes_capacity,
hashes_length: self.hashes.len(),
total_bytes,
npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0),
gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire),
nshapes: 0,
}
}
pub(crate) fn clear(&mut self) {
*self = Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: self.free_ids.take(),
new_ids: Vec::new(),
values_bytes: 0,
}
}
pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> {
let (hash_id, entry) = if let Some(free_id) = self.get_free_id() {
if let Some(old_value) = self.values.set(free_id, None)? {
self.values_bytes = self.values_bytes.saturating_sub(old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
}
fn get_free_id(&mut self) -> Option<HashId> {
self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) | get_memory_usage | identifier_name |
ghttp_server_router.go | &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例 | 如:/user/*acti | identifier_name |
|
ghttp_server_router.go | " {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例如:/user/*action 匹配 /user 的规则
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names)
handlerItem := &handlerParsedItem{item, nil}
// 如果需要query匹配,那么需要重新正则解析URL
if len(item.router.RegNames) > 0 {
if len(match) > len(item.router.RegNames) {
handlerItem.values = make(map[string][]string)
// 如果存在存在同名路由参数名称,那么执行数组追加
for i, name := range item.router.RegNames {
if _, ok := handlerItem.values[name]; ok {
handlerItem.values[name] = append(handlerItem.values[name], match[i + 1])
} else {
handlerItem.values[name] = []string{match[i + 1]}
}
}
}
}
return handlerItem
}
}
}
}
}
return nil
}
// 将pattern(不带method和domain)解析成正则表达式匹配以及对应的query字符串
func (s *Server) patternToRegRule(rule string) (regrule string, names []string) {
if len(rule) < 2 {
return rule, nil
}
regrule = "^"
array := strings.Split(rule[1:], "/")
for _, v := range array {
if len(v) == 0 {
continue
}
switch v[0] {
case ':':
regrule += `/([\w\.\-]+)`
names = append(names, v[1:])
case '*':
regrule += `/{0,1}(.*)`
names = append(names, v[1:]) | default:
s, _ := gregex.ReplaceStringFunc(`{[\w\.\-]+}`, v, func(s string) string {
names = append(names, s[1 : len(s) - 1])
return `([\w\.\-]+)` | random_line_split |
|
ghttp_server_router.go | }
if array, err := gregex.MatchString(`(.+)@([\w\.\-]+)`, uri); len(array) > 1 && err == nil {
uri = array[1]
domain = array[2]
}
if uri == "" {
err = errors.New("invalid pattern")
}
// 去掉末尾的"/"符号,与路由匹配时处理一直
if uri != "/" {
uri = strings.TrimRight(uri, "/")
}
return
}
// 路由注册处理方法。
// 如果带有hook参数,表示是回调注册方法,否则为普通路由执行方法。
func (s *Server) setHandler(pattern string, handler *handlerItem, hook ... string) error {
// Web Server正字运行时无法动态注册路由方法
if s.status == gSERVER_STATUS_RUNNING {
return errors.New("cannnot bind handler while server running")
}
var hookName string
if len(hook) > 0 {
hookName = hook[0]
}
domain, method, uri, err := s.parsePattern(pattern)
if err != nil {
return errors.New("invalid pattern")
}
// 路由对象
router := &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
| pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range | if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
| conditional_block |
ghttp_server_router.go | // 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例如:/user/*action 匹配 /user 的规则
if k == len(array) - | 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names) | identifier_body |
|
licensePlateDetectorOptimized.py | Contours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
contours = self.sort_contours_left(contours) #sort the contours from left to right
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert the image colors back
letters = []
for contour in contours:
if cv.contourArea(contour) > self.letter_contour_min: #check if each contour is too small or not (makes sure we are analyzing potential chars)
x, y, w, h = cv.boundingRect(contour) #bouding rect around the supposed character
letterInterest = thresh[0 : y + h, x : x + w] #making sure that the top of the letter is not cut off when getting the ROI
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0)) #bounding box on the passed image
letterImage = cv.resize(letterInterest, (60, 80)) #resize the char ROI
letters.append(letterImage) #append the char ROI
if debug: #if debugging, show all relevant images
cv.imshow("GRAY {}".format(counter), imutils.resize(thresh, height=200))
cv.imshow(name, image) #showing and resizing image
cv.moveWindow(name, 0, 110 * counter - 50) #Moving the ROI windows into the right spot on the screen
if len(letters) > 4: #if atleast four letters are detected, then return the array
if collect_data:
NeuralNetwork.label_letter(letters) #if we are collecting data, then do what is necessary there to create the label
return letters
else: return None
def check_min_rect(self, contour): #function for getting the min-area rectangle and validating whether it is ok
rect = cv.minAreaRect(contour) #get the min area rect
rx, ry, rw, rh = cv.boundingRect(contour) #get the bounding rectangle coordinates
if self.validateRatio(rect, rw, rh): #check the ratios of the ROI
brect = self.img[ry : ry + rh, rx : rx + rw] #get the ROI
self.roi_array.append((brect, rx, ry, rw, rh)) #append this ROI to the ROI array
return True #if everything is right, then return true to show that it is valid
else:
return False #else, return false
def validateRatio(self, rect, rw, rh): #more checking that the contour could be a license plate
(x, y), (width, height), angle = rect #get all of the data about the minarea bounding rectangle
if width == 0 or height == 0: #to defend against illegal math operations which panic the program
return False
angle = angle % 90 #getting the angle in the most basic form
area = width * height #calc the area
if not ((angle < self.angle_max or angle > self.angle_min) and (area > self.area_min and area < self.area_max)):
return False #if something is off, then return false (check that the angle is almost 90 or almost 0 and that the area is ok)
if rw < rh: #if the width is less than the height, return false
return False
return self.rat_check(width, height) #check the ratios
def rat_check(self, width, height):
ratio = float(width) / float(height) #check whether the width to height ratio is wrong
if ratio < 1:
ratio = 1 / ratio #making it so that the ratio is always more than 1
return not (ratio < self.ratio_min or ratio > self.ratio_max) #if the area is not in range or the ratio is off, return false
########################################################################################
#################################### SHOWING IMAGES ####################################
########################################################################################
def show_images(self, height = 300): #showing the image which is necessary every iteration
cv.imshow("Original", imutils.resize(self.img, height = 200))
def check_keys(self):
if self.check_wait: #if going through the contours, check if q is pressed
key = cv.waitKey(0) & 0xFF
print("NEXT IMAGE")
if key == ord('q'): #exit button
exit(0)
else:
key = cv.waitKey(1)
if key & 0xFF == ord('q'): #exit button
exit(0)
elif key == ord('s'): #skip forward in the video
skip_forward()
elif key & 0xFF == ord('p'): # this creates a pause button for the video, in essence
| print("VIDEO PAUSED @ FRAME {}".format(cap.get(cv.CAP_PROP_POS_FRAMES)))
while True:
key = cv.waitKey(25) & 0xFF
if key == ord('p'): #unpause
break
elif key == ord('q'): #quit the program button
exit(0)
elif key == ord('s'): #skip forward
skip_forward() | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.