code
stringlengths
4
1.01M
language
stringclasses
2 values
package org.lemurproject.galago.core.util; import org.lemurproject.galago.core.retrieval.iterator.BaseIterator; import org.lemurproject.galago.core.retrieval.traversal.Traversal; import org.lemurproject.galago.utility.Parameters; import java.util.ArrayList; import java.util.List; /** * @author jfoley. */ public class IterUtils { /** * Adds an operator into a Retrieval's parameters for usage. * @param p the parameters object * @param name the name of the operator, e.g. "combine" for #combine * @param iterClass the operator to register */ public static void addToParameters(Parameters p, String name, Class<? extends BaseIterator> iterClass) { if(!p.containsKey("operators")) { p.put("operators", Parameters.create()); } p.getMap("operators").put(name, iterClass.getName()); } /** * Adds a traversal into a Retrieval's parameters for usage. * @param argp the parameters object * @param traversalClass the traversal to register */ public static void addToParameters(Parameters argp, Class<? extends Traversal> traversalClass) { if(!argp.isList("traversals")) { argp.put("traversals", new ArrayList<>()); } List<Parameters> traversals = argp.getList("traversals", Parameters.class); traversals.add(Parameters.parseArray( "name", traversalClass.getName(), "order", "before" )); argp.put("traversals", traversals); } }
Java
from pybrain.rl.environments.timeseries.maximizereturntask import DifferentialSharpeRatioTask from pybrain.rl.environments.timeseries.timeseries import AR1Environment, SnPEnvironment from pybrain.rl.learners.valuebased.linearfa import Q_LinFA from pybrain.rl.agents.linearfa import LinearFA_Agent from pybrain.rl.experiments import ContinuousExperiment from matplotlib import pyplot """ This script aims to create a trading model that trades on a simple AR(1) process """ env=AR1Environment(2000) task=DifferentialSharpeRatioTask(env) learner = Q_LinFA(2,1) agent = LinearFA_Agent(learner) exp = ContinuousExperiment(task,agent) from decimal import Decimal ts=env.ts.tolist() exp.doInteractionsAndLearn(1999) actionHist=env.actionHistory pyplot.plot(ts[0]) pyplot.plot(actionHist) pyplot.show() #snp_rets=env.importSnP().tolist()[0] #print(snp_rets.tolist()[0]) #pyplot.plot(snp_rets) #pyplot.show() #cumret= cumsum(multiply(ts,actionHist)) #exp.doInteractions(200)
Java
using Basic.Azure.Storage.Communications.Core; using Basic.Azure.Storage.Communications.Core.Interfaces; using Basic.Azure.Storage.Communications.TableService.Interfaces; using Newtonsoft.Json; using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Text; using System.Threading.Tasks; namespace Basic.Azure.Storage.Communications.TableService.EntityOperations { public class DeleteEntityRequest : RequestBase<EmptyResponsePayload>, ISendAdditionalRequiredHeaders { private string _tableName; private string _partitionKey; private string _rowKey; private string _etag; public DeleteEntityRequest(StorageAccountSettings settings, string tableName, string partitionKey, string rowKey, string etag) : base(settings) { _tableName = tableName; _partitionKey = partitionKey; _rowKey = rowKey; if (string.IsNullOrEmpty(etag)) _etag = "*"; else _etag = etag; } protected override string HttpMethod { get { return "DELETE"; } } protected override StorageServiceType ServiceType { get { return StorageServiceType.TableService; } } protected override RequestUriBuilder GetUriBase() { var builder = new RequestUriBuilder(Settings.TableEndpoint); builder.AddSegment(String.Format("{0}(PartitionKey='{1}',RowKey='{2}')", _tableName, _partitionKey, _rowKey)); return builder; } public void ApplyAdditionalRequiredHeaders(System.Net.WebRequest request) { request.ContentType = "application/json;charset=utf-8"; if (request is HttpWebRequest) { ((HttpWebRequest)request).Accept = "application/json"; } else { request.Headers.Add(ProtocolConstants.Headers.Accept, "application/json"); } request.Headers.Add(ProtocolConstants.Headers.IfMatch, _etag); } } }
Java
/****************************************************************/ /* 1. BACKGROUND AND TEXT COLOURS */ /****************************************************************/ /* 1.1 Default background colour and text colour */ body{text-align:center;background : url(../img/back.gif);} /* �������� ����� */ .djangobb {COLOR: #333} DIV.box, #adminconsole FIELDSET TH, .rowodd, .roweven {BACKGROUND-COLOR: #f8f8f8} .djangobb BLOCKQUOTE, DIV.code {BACKGROUND-COLOR: #f1f1f1} #adminconsole TD, #adminconsole TH {BORDER-COLOR: #eeeeee} /*<<<<<<<<<<<neizvestno*/ /* 1. 2 Darker background colours */ TD.tc2, TD.tc3, TD.tcmod, #postpreview, #viewprofile DD, DIV.forminfo, #adminconsole FIELDSET TD, DIV.blockmenu DIV.box, #adstats DD {BACKGROUND-COLOR: #f5f5f5} TH.tc3 {width: 70px} /* 1.3 Main headers and navigation bar background and text colour */ .djangobb H2, #brdmenu {BACKGROUND: url(../img/fon111.gif); COLOR: #FFF; font-weight: bold; height: 16px;} .djangobb H2 {font-weight: bold;BACKGROUND: #46586a url(../img/fon111.gif);border-top: 1px solid #46586a;border-left: 1px solid #46586a;border-right: 1px solid #46586a; height: 16px;} .djangobb H2 a.toggle { float: right; BACKGROUND: url(../img/cat_minimize.png); display:block; height:17px; line-height:1px; outline-color:-moz-use-text-color; outline-style:none; outline-width:0; text-indent:-3000em; width:28px; } .djangobb H2.collapsed a.toggle { background-image:url(../img/cat_maximize.png); } /* 1.4 Table header rows */ .djangobb TH {BACKGROUND-COLOR: #eeeeee;} /* 1.5 Fieldset legend text colour */ .djangobb LEGEND {COLOR: #5A799D} /* 1.6 Highlighted text for various items */ .djangobb DIV.blockmenu LI.isactive A, #posterror LI STRONG {COLOR: #46586a} /****************************************************************/ /* 2. POST BACKGROUNDS AND TEXT */ /****************************************************************/ /* 2.1 This is the setup for posts. */ DIV.blockpost DIV.box, DIV.postright, DIV.postfootright {BACKGROUND-COLOR: #f8f8f8} DIV.postright, DIV.postfootright {BORDER-LEFT-COLOR: #f1f1f1} DIV.postleft, DIV.postfootleft, DIV.blockpost LABEL {BACKGROUND-COLOR: #f1f1f1} /* 2.2 Background for post headers and text colour for post numbers in viewtopic */ DIV.blockpost H2 {BACKGROUND-COLOR: #46586a} DIV.blockpost H2 SPAN.conr {COLOR: #CCC} /* 2.3 This is the line above the signature in posts. Colour and background should be the same */ .djangobb HR {BACKGROUND-COLOR: #35567C; COLOR: #35567C} /****************************************************************/ /* 3. BORDER COLOURS */ /****************************************************************/ /* 3.1 All external borders */ DIV.box {BORDER-COLOR: #46586a} /* 3.2 Makes the top border of posts match the colour used for post headers */ /*DIV.blockpost DIV.box {BORDER-COLOR: #686E80 #686E80 #686E80}*/ DIV.blockpost DIV.box {BORDER: 1px solid #46586a;BORDER-TOP: 1px solid #46586a;} /* 3.3 Table internal borders. By default TH is same as background so border is invisible */ .djangobb TD {BORDER-COLOR: #d6e1f1} .djangobb TH {BORDER-COLOR: #d6e1f1} /* 3.4 Creates the inset border for quote boxes, code boxes and form info boxes */ .djangobb BLOCKQUOTE, DIV.code, DIV.forminfo, DIV.blockpost LABEL {BORDER-COLOR: #46586a #46586a #46586a #46586a} /* 3.5 Gecko's default fieldset borders are really nasty so this gives them a colour without interferring with IE's rather nice default */ /****************************************************************/ /* 4. LINK COLOURS */ /****************************************************************/ /* 4.1 This is the default for all links */ .djangobb A:link, .djangobb A:visited {COLOR: #46586a;} .djangobb A:hover {COLOR: #B42000; } /* 4.2 This is the colour for links in header rows and the navigation bar */ .djangobb H2 A:link, .djangobb H2 A:visited, #brdmenu A:link, #brdmenu A:visited {COLOR: #FFF;text-decoration : underline; } .djangobb H2 A:hover, #brdmenu A:hover {COLOR: #35567C;} /* 4.3 This is for closed topics and "hot" links */ LI.postreport A:link, LI.postreport A:visited, TR.iclosed TD.tcl A:link, TR.iclosed TD.tcl A:visited {COLOR: #888} LI.postreport A:hover, TR.iclosed TD.tcl A:hover {COLOR: #AAA} LI.maintenancelink A:link, LI.maintenancelink A:visited {COLOR: #B42000} LI.maintenancelink A:hover {COLOR: #B42000} /****************************************************************/ /* 5. POST STATUS INDICATORS */ /****************************************************************/ /* These are the post status indicators which appear at the left of some tables. .inew = new posts, .iredirect = redirect forums, .iclosed = closed topics and .isticky = sticky topics. The default is "icon". By default only .inew is different.*/ DIV.forum_icon {background:url(../img/active_forum.gif)} DIV.icon {background:url(../img/inactive_topic.gif)} DIV.inew {background:url(../img/active_topic.gif)} DIV.sticky {background:url(../img/sticky_topic.gif)} DIV.closed {background:url(../img/closed_topic.gif)} TR.inew DIV.icon {background:url(../img/active_topic.gif)} TR.isticky DIV.inew {background:url(../img/active_topic.gif)} TR.iclosed DIV.icon {background:url(../img/closed.png)} DIV.iredirect {background:url(../img/link.png)} /****************************************************************/ /* 6. EXTRAS */ /****************************************************************/ #brdmenu {BACKGROUND: url(../img/fon111.gif); border-top: 1px solid #46586a;border-bottom: 1px solid #46586a; BACKGROUND-COLOR: #46586a;} /* header logo */ .djangobb H1 span { color: #3A3B3C; font-size: 16px; /*bylo 12*/ font-family: Tahoma, Verdana, Arial, Helvetica, sans-serif; } .djangobb h2 span { background:transparent url(../img/category.gif) no-repeat scroll left center; } .djangobb h2 span.conr { background-image:none; } /* board stats icon */ #djangobbindex #brdstats div.box div.inbox dl.conl, #djangobbforum #brdstats div.box div.inbox dl.conl { background: transparent url(../img/stats.gif) no-repeat 0% 50%; padding-left: 38px; }
Java
#pragma once #include <deque> #include <functional> #include <pthread.h> #include "basetypes.h" #include "misc.h" #define Pthread_spin_init(l, pshared) verify(pthread_spin_init(l, (pshared)) == 0) #define Pthread_spin_lock(l) verify(pthread_spin_lock(l) == 0) #define Pthread_spin_unlock(l) verify(pthread_spin_unlock(l) == 0) #define Pthread_spin_destroy(l) verify(pthread_spin_destroy(l) == 0) #define Pthread_mutex_init(m, attr) verify(pthread_mutex_init(m, attr) == 0) #define Pthread_mutex_lock(m) verify(pthread_mutex_lock(m) == 0) #define Pthread_mutex_unlock(m) verify(pthread_mutex_unlock(m) == 0) #define Pthread_mutex_destroy(m) verify(pthread_mutex_destroy(m) == 0) #define Pthread_cond_init(c, attr) verify(pthread_cond_init(c, attr) == 0) #define Pthread_cond_destroy(c) verify(pthread_cond_destroy(c) == 0) #define Pthread_cond_signal(c) verify(pthread_cond_signal(c) == 0) #define Pthread_cond_broadcast(c) verify(pthread_cond_broadcast(c) == 0) #define Pthread_cond_wait(c, m) verify(pthread_cond_wait(c, m) == 0) #define Pthread_create(th, attr, func, arg) verify(pthread_create(th, attr, func, arg) == 0) #define Pthread_join(th, attr) verify(pthread_join(th, attr) == 0) namespace base { class Lockable: public NoCopy { public: virtual void lock() = 0; virtual void unlock() = 0; }; class SpinLock: public Lockable { public: SpinLock(): locked_(false) { } void lock(); void unlock() { __sync_lock_release(&locked_); } private: volatile bool locked_ __attribute__((aligned (64))); }; class Mutex: public Lockable { public: Mutex() { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); Pthread_mutex_init(&m_, &attr); } ~Mutex() { Pthread_mutex_destroy(&m_); } void lock() { Pthread_mutex_lock(&m_); } void unlock() { Pthread_mutex_unlock(&m_); } private: friend class CondVar; pthread_mutex_t m_; }; // choice between spinlock & mutex: // * when n_thread > n_core, use mutex // * on virtual machines, use mutex class ScopedLock: public NoCopy { public: explicit ScopedLock(Lockable* lock): m_(lock) { m_->lock(); } explicit ScopedLock(Lockable& lock): m_(&lock) { m_->lock(); } ~ScopedLock() { m_->unlock(); } private: Lockable* m_; }; class CondVar: public NoCopy { public: CondVar() { Pthread_cond_init(&cv_, nullptr); } ~CondVar() { Pthread_cond_destroy(&cv_); } void wait(Mutex& m) { Pthread_cond_wait(&cv_, &m.m_); } void signal() { Pthread_cond_signal(&cv_); } void bcast() { Pthread_cond_broadcast(&cv_); } int timed_wait(Mutex& m, double sec); private: pthread_cond_t cv_; }; /** * Thread safe queue. */ template<class T> class Queue: public NoCopy { std::deque<T> q_; pthread_cond_t not_empty_; pthread_mutex_t m_; public: Queue() { Pthread_mutex_init(&m_, nullptr); Pthread_cond_init(&not_empty_, nullptr); } ~Queue() { Pthread_cond_destroy(&not_empty_); Pthread_mutex_destroy(&m_); } void push(const T& e) { Pthread_mutex_lock(&m_); q_.push_back(e); Pthread_cond_signal(&not_empty_); Pthread_mutex_unlock(&m_); } bool try_pop(T* t) { bool ret = false; Pthread_mutex_lock(&m_); if (!q_.empty()) { ret = true; *t = q_.front(); q_.pop_front(); } Pthread_mutex_unlock(&m_); return ret; } bool try_pop_but_ignore(T* t, const T& ignore) { bool ret = false; Pthread_mutex_lock(&m_); if (!q_.empty() && q_.front() != ignore) { ret = true; *t = q_.front(); q_.pop_front(); } Pthread_mutex_unlock(&m_); return ret; } T pop() { Pthread_mutex_lock(&m_); while (q_.empty()) { Pthread_cond_wait(&not_empty_, &m_); } T e = q_.front(); q_.pop_front(); Pthread_mutex_unlock(&m_); return e; } }; class ThreadPool: public RefCounted { int n_; Counter round_robin_; pthread_t* th_; Queue<std::function<void()>*>* q_; bool should_stop_; static void* start_thread_pool(void*); void run_thread(int id_in_pool); protected: ~ThreadPool(); public: ThreadPool(int n = get_ncpu() * 2); // return 0 when queuing ok, otherwise EPERM int run_async(const std::function<void()>&, int queuing_channel = -1); }; class RunLater: public RefCounted { typedef std::pair<double, std::function<void()>*> job_t; pthread_t th_; pthread_mutex_t m_; pthread_cond_t cv_; bool should_stop_; SpinLock latest_l_; double latest_; std::priority_queue<job_t, std::vector<job_t>, std::greater<job_t>> jobs_; static void* start_run_later(void*); void run_later_loop(); void try_one_job(); public: RunLater(); // return 0 when queuing ok, otherwise EPERM int run_later(double sec, const std::function<void()>&); double max_wait() const; protected: ~RunLater(); }; } // namespace base
Java
<!-- @license Copyright (c) 2014 The Polymer Project Authors. All rights reserved. This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt Code distributed by Google as part of the polymer project is also subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt --> <script> (function() { 'use strict'; var wrap = Polymer.DomApi.wrap; // detect native touch action support var HAS_NATIVE_TA = typeof document.head.style.touchAction === 'string'; var GESTURE_KEY = '__polymerGestures'; var HANDLED_OBJ = '__polymerGesturesHandled'; var TOUCH_ACTION = '__polymerGesturesTouchAction'; // radius for tap and track var TAP_DISTANCE = 25; var TRACK_DISTANCE = 5; // number of last N track positions to keep var TRACK_LENGTH = 2; // Disabling "mouse" handlers for 2500ms is enough var MOUSE_TIMEOUT = 2500; var MOUSE_EVENTS = ['mousedown', 'mousemove', 'mouseup', 'click']; // an array of bitmask values for mapping MouseEvent.which to MouseEvent.buttons var MOUSE_WHICH_TO_BUTTONS = [0, 1, 4, 2]; var MOUSE_HAS_BUTTONS = (function() { try { return new MouseEvent('test', {buttons: 1}).buttons === 1; } catch (e) { return false; } })(); // Check for touch-only devices var IS_TOUCH_ONLY = navigator.userAgent.match(/iP(?:[oa]d|hone)|Android/); // touch will make synthetic mouse events // `preventDefault` on touchend will cancel them, // but this breaks `<input>` focus and link clicks // disable mouse handlers for MOUSE_TIMEOUT ms after // a touchend to ignore synthetic mouse events var mouseCanceller = function(mouseEvent) { // Check for sourceCapabilities, used to distinguish synthetic events // if mouseEvent did not come from a device that fires touch events, // it was made by a real mouse and should be counted // http://wicg.github.io/InputDeviceCapabilities/#dom-inputdevicecapabilities-firestouchevents var sc = mouseEvent.sourceCapabilities; if (sc && !sc.firesTouchEvents) { return; } // skip synthetic mouse events mouseEvent[HANDLED_OBJ] = {skip: true}; // disable "ghost clicks" if (mouseEvent.type === 'click') { var path = Polymer.dom(mouseEvent).path; for (var i = 0; i < path.length; i++) { if (path[i] === POINTERSTATE.mouse.target) { return; } } mouseEvent.preventDefault(); mouseEvent.stopPropagation(); } }; function setupTeardownMouseCanceller(setup) { for (var i = 0, en; i < MOUSE_EVENTS.length; i++) { en = MOUSE_EVENTS[i]; if (setup) { document.addEventListener(en, mouseCanceller, true); } else { document.removeEventListener(en, mouseCanceller, true); } } } function ignoreMouse() { if (IS_TOUCH_ONLY) { return; } if (!POINTERSTATE.mouse.mouseIgnoreJob) { setupTeardownMouseCanceller(true); } var unset = function() { setupTeardownMouseCanceller(); POINTERSTATE.mouse.target = null; POINTERSTATE.mouse.mouseIgnoreJob = null; }; POINTERSTATE.mouse.mouseIgnoreJob = Polymer.Debounce(POINTERSTATE.mouse.mouseIgnoreJob, unset, MOUSE_TIMEOUT); } function hasLeftMouseButton(ev) { var type = ev.type; // exit early if the event is not a mouse event if (MOUSE_EVENTS.indexOf(type) === -1) { return false; } // ev.button is not reliable for mousemove (0 is overloaded as both left button and no buttons) // instead we use ev.buttons (bitmask of buttons) or fall back to ev.which (deprecated, 0 for no buttons, 1 for left button) if (type === 'mousemove') { // allow undefined for testing events var buttons = ev.buttons === undefined ? 1 : ev.buttons; if ((ev instanceof window.MouseEvent) && !MOUSE_HAS_BUTTONS) { buttons = MOUSE_WHICH_TO_BUTTONS[ev.which] || 0; } // buttons is a bitmask, check that the left button bit is set (1) return Boolean(buttons & 1); } else { // allow undefined for testing events var button = ev.button === undefined ? 0 : ev.button; // ev.button is 0 in mousedown/mouseup/click for left button activation return button === 0; } } function isSyntheticClick(ev) { if (ev.type === 'click') { // ev.detail is 0 for HTMLElement.click in most browsers if (ev.detail === 0) { return true; } // in the worst case, check that the x/y position of the click is within // the bounding box of the target of the event // Thanks IE 10 >:( var t = Gestures.findOriginalTarget(ev); var bcr = t.getBoundingClientRect(); // use page x/y to account for scrolling var x = ev.pageX, y = ev.pageY; // ev is a synthetic click if the position is outside the bounding box of the target return !((x >= bcr.left && x <= bcr.right) && (y >= bcr.top && y <= bcr.bottom)); } return false; } var POINTERSTATE = { mouse: { target: null, mouseIgnoreJob: null }, touch: { x: 0, y: 0, id: -1, scrollDecided: false } }; function firstTouchAction(ev) { var path = Polymer.dom(ev).path; var ta = 'auto'; for (var i = 0, n; i < path.length; i++) { n = path[i]; if (n[TOUCH_ACTION]) { ta = n[TOUCH_ACTION]; break; } } return ta; } function trackDocument(stateObj, movefn, upfn) { stateObj.movefn = movefn; stateObj.upfn = upfn; document.addEventListener('mousemove', movefn); document.addEventListener('mouseup', upfn); } function untrackDocument(stateObj) { document.removeEventListener('mousemove', stateObj.movefn); document.removeEventListener('mouseup', stateObj.upfn); stateObj.movefn = null; stateObj.upfn = null; } var Gestures = { gestures: {}, recognizers: [], deepTargetFind: function(x, y) { var node = document.elementFromPoint(x, y); var next = node; // this code path is only taken when native ShadowDOM is used // if there is a shadowroot, it may have a node at x/y // if there is not a shadowroot, exit the loop while (next && next.shadowRoot) { // if there is a node at x/y in the shadowroot, look deeper next = next.shadowRoot.elementFromPoint(x, y); if (next) { node = next; } } return node; }, // a cheaper check than Polymer.dom(ev).path[0]; findOriginalTarget: function(ev) { // shadowdom if (ev.path) { return ev.path[0]; } // shadydom return ev.target; }, handleNative: function(ev) { var handled; var type = ev.type; var node = wrap(ev.currentTarget); var gobj = node[GESTURE_KEY]; if (!gobj) { return; } var gs = gobj[type]; if (!gs) { return; } if (!ev[HANDLED_OBJ]) { ev[HANDLED_OBJ] = {}; if (type.slice(0, 5) === 'touch') { var t = ev.changedTouches[0]; if (type === 'touchstart') { // only handle the first finger if (ev.touches.length === 1) { POINTERSTATE.touch.id = t.identifier; } } if (POINTERSTATE.touch.id !== t.identifier) { return; } if (!HAS_NATIVE_TA) { if (type === 'touchstart' || type === 'touchmove') { Gestures.handleTouchAction(ev); } } // disable synth mouse events, unless this event is itself simulated if (type === 'touchend') { POINTERSTATE.mouse.target = Polymer.dom(ev).rootTarget; // ignore syntethic mouse events after a touch ignoreMouse(); } } } handled = ev[HANDLED_OBJ]; // used to ignore synthetic mouse events if (handled.skip) { return; } var recognizers = Gestures.recognizers; // reset recognizer state for (var i = 0, r; i < recognizers.length; i++) { r = recognizers[i]; if (gs[r.name] && !handled[r.name]) { if (r.flow && r.flow.start.indexOf(ev.type) > -1 && r.reset) { r.reset(); } } } // enforce gesture recognizer order for (i = 0, r; i < recognizers.length; i++) { r = recognizers[i]; if (gs[r.name] && !handled[r.name]) { handled[r.name] = true; r[type](ev); } } }, handleTouchAction: function(ev) { var t = ev.changedTouches[0]; var type = ev.type; if (type === 'touchstart') { POINTERSTATE.touch.x = t.clientX; POINTERSTATE.touch.y = t.clientY; POINTERSTATE.touch.scrollDecided = false; } else if (type === 'touchmove') { if (POINTERSTATE.touch.scrollDecided) { return; } POINTERSTATE.touch.scrollDecided = true; var ta = firstTouchAction(ev); var prevent = false; var dx = Math.abs(POINTERSTATE.touch.x - t.clientX); var dy = Math.abs(POINTERSTATE.touch.y - t.clientY); if (!ev.cancelable) { // scrolling is happening } else if (ta === 'none') { prevent = true; } else if (ta === 'pan-x') { prevent = dy > dx; } else if (ta === 'pan-y') { prevent = dx > dy; } if (prevent) { ev.preventDefault(); } else { Gestures.prevent('track'); } } }, // automate the event listeners for the native events add: function(node, evType, handler) { // SD polyfill: handle case where `node` is unwrapped, like `document` node = wrap(node); var recognizer = this.gestures[evType]; var deps = recognizer.deps; var name = recognizer.name; var gobj = node[GESTURE_KEY]; if (!gobj) { node[GESTURE_KEY] = gobj = {}; } for (var i = 0, dep, gd; i < deps.length; i++) { dep = deps[i]; // don't add mouse handlers on iOS because they cause gray selection overlays if (IS_TOUCH_ONLY && MOUSE_EVENTS.indexOf(dep) > -1) { continue; } gd = gobj[dep]; if (!gd) { gobj[dep] = gd = {_count: 0}; } if (gd._count === 0) { node.addEventListener(dep, this.handleNative); } gd[name] = (gd[name] || 0) + 1; gd._count = (gd._count || 0) + 1; } node.addEventListener(evType, handler); if (recognizer.touchAction) { this.setTouchAction(node, recognizer.touchAction); } }, // automate event listener removal for native events remove: function(node, evType, handler) { // SD polyfill: handle case where `node` is unwrapped, like `document` node = wrap(node); var recognizer = this.gestures[evType]; var deps = recognizer.deps; var name = recognizer.name; var gobj = node[GESTURE_KEY]; if (gobj) { for (var i = 0, dep, gd; i < deps.length; i++) { dep = deps[i]; gd = gobj[dep]; if (gd && gd[name]) { gd[name] = (gd[name] || 1) - 1; gd._count = (gd._count || 1) - 1; if (gd._count === 0) { node.removeEventListener(dep, this.handleNative); } } } } node.removeEventListener(evType, handler); }, register: function(recog) { this.recognizers.push(recog); for (var i = 0; i < recog.emits.length; i++) { this.gestures[recog.emits[i]] = recog; } }, findRecognizerByEvent: function(evName) { for (var i = 0, r; i < this.recognizers.length; i++) { r = this.recognizers[i]; for (var j = 0, n; j < r.emits.length; j++) { n = r.emits[j]; if (n === evName) { return r; } } } return null; }, // set scrolling direction on node to check later on first move // must call this before adding event listeners! setTouchAction: function(node, value) { if (HAS_NATIVE_TA) { node.style.touchAction = value; } node[TOUCH_ACTION] = value; }, fire: function(target, type, detail) { var ev = Polymer.Base.fire(type, detail, { node: target, bubbles: true, cancelable: true }); // forward `preventDefault` in a clean way if (ev.defaultPrevented) { var preventer = detail.preventer || detail.sourceEvent; if (preventer && preventer.preventDefault) { preventer.preventDefault(); } } }, prevent: function(evName) { var recognizer = this.findRecognizerByEvent(evName); if (recognizer.info) { recognizer.info.prevent = true; } }, /** * Reset the 2500ms timeout on processing mouse input after detecting touch input. * * Touch inputs create synthesized mouse inputs anywhere from 0 to 2000ms after the touch. * This method should only be called during testing with simulated touch inputs. * Calling this method in production may cause duplicate taps or other gestures. * * @method resetMouseCanceller */ resetMouseCanceller: function() { if (POINTERSTATE.mouse.mouseIgnoreJob) { POINTERSTATE.mouse.mouseIgnoreJob.complete(); } } }; Gestures.register({ name: 'downup', deps: ['mousedown', 'touchstart', 'touchend'], flow: { start: ['mousedown', 'touchstart'], end: ['mouseup', 'touchend'] }, emits: ['down', 'up'], info: { movefn: null, upfn: null }, reset: function() { untrackDocument(this.info); }, mousedown: function(e) { if (!hasLeftMouseButton(e)) { return; } var t = Gestures.findOriginalTarget(e); var self = this; var movefn = function movefn(e) { if (!hasLeftMouseButton(e)) { self.fire('up', t, e); untrackDocument(self.info); } }; var upfn = function upfn(e) { if (hasLeftMouseButton(e)) { self.fire('up', t, e); } untrackDocument(self.info); }; trackDocument(this.info, movefn, upfn); this.fire('down', t, e); }, touchstart: function(e) { this.fire('down', Gestures.findOriginalTarget(e), e.changedTouches[0], e); }, touchend: function(e) { this.fire('up', Gestures.findOriginalTarget(e), e.changedTouches[0], e); }, fire: function(type, target, event, preventer) { Gestures.fire(target, type, { x: event.clientX, y: event.clientY, sourceEvent: event, preventer: preventer, prevent: function(e) { return Gestures.prevent(e); } }); } }); Gestures.register({ name: 'track', touchAction: 'none', deps: ['mousedown', 'touchstart', 'touchmove', 'touchend'], flow: { start: ['mousedown', 'touchstart'], end: ['mouseup', 'touchend'] }, emits: ['track'], info: { x: 0, y: 0, state: 'start', started: false, moves: [], addMove: function(move) { if (this.moves.length > TRACK_LENGTH) { this.moves.shift(); } this.moves.push(move); }, movefn: null, upfn: null, prevent: false }, reset: function() { this.info.state = 'start'; this.info.started = false; this.info.moves = []; this.info.x = 0; this.info.y = 0; this.info.prevent = false; untrackDocument(this.info); }, hasMovedEnough: function(x, y) { if (this.info.prevent) { return false; } if (this.info.started) { return true; } var dx = Math.abs(this.info.x - x); var dy = Math.abs(this.info.y - y); return (dx >= TRACK_DISTANCE || dy >= TRACK_DISTANCE); }, mousedown: function(e) { if (!hasLeftMouseButton(e)) { return; } var t = Gestures.findOriginalTarget(e); var self = this; var movefn = function movefn(e) { var x = e.clientX, y = e.clientY; if (self.hasMovedEnough(x, y)) { // first move is 'start', subsequent moves are 'move', mouseup is 'end' self.info.state = self.info.started ? (e.type === 'mouseup' ? 'end' : 'track') : 'start'; if (self.info.state === 'start') { // if and only if tracking, always prevent tap Gestures.prevent('tap'); } self.info.addMove({x: x, y: y}); if (!hasLeftMouseButton(e)) { // always fire "end" self.info.state = 'end'; untrackDocument(self.info); } self.fire(t, e); self.info.started = true; } }; var upfn = function upfn(e) { if (self.info.started) { movefn(e); } // remove the temporary listeners untrackDocument(self.info); }; // add temporary document listeners as mouse retargets trackDocument(this.info, movefn, upfn); this.info.x = e.clientX; this.info.y = e.clientY; }, touchstart: function(e) { var ct = e.changedTouches[0]; this.info.x = ct.clientX; this.info.y = ct.clientY; }, touchmove: function(e) { var t = Gestures.findOriginalTarget(e); var ct = e.changedTouches[0]; var x = ct.clientX, y = ct.clientY; if (this.hasMovedEnough(x, y)) { if (this.info.state === 'start') { // if and only if tracking, always prevent tap Gestures.prevent('tap'); } this.info.addMove({x: x, y: y}); this.fire(t, ct); this.info.state = 'track'; this.info.started = true; } }, touchend: function(e) { var t = Gestures.findOriginalTarget(e); var ct = e.changedTouches[0]; // only trackend if track was started and not aborted if (this.info.started) { // reset started state on up this.info.state = 'end'; this.info.addMove({x: ct.clientX, y: ct.clientY}); this.fire(t, ct, e); } }, fire: function(target, touch, preventer) { var secondlast = this.info.moves[this.info.moves.length - 2]; var lastmove = this.info.moves[this.info.moves.length - 1]; var dx = lastmove.x - this.info.x; var dy = lastmove.y - this.info.y; var ddx, ddy = 0; if (secondlast) { ddx = lastmove.x - secondlast.x; ddy = lastmove.y - secondlast.y; } return Gestures.fire(target, 'track', { state: this.info.state, x: touch.clientX, y: touch.clientY, dx: dx, dy: dy, ddx: ddx, ddy: ddy, sourceEvent: touch, preventer: preventer, hover: function() { return Gestures.deepTargetFind(touch.clientX, touch.clientY); } }); } }); Gestures.register({ name: 'tap', deps: ['mousedown', 'click', 'touchstart', 'touchend'], flow: { start: ['mousedown', 'touchstart'], end: ['click', 'touchend'] }, emits: ['tap'], info: { x: NaN, y: NaN, prevent: false }, reset: function() { this.info.x = NaN; this.info.y = NaN; this.info.prevent = false; }, save: function(e) { this.info.x = e.clientX; this.info.y = e.clientY; }, mousedown: function(e) { if (hasLeftMouseButton(e)) { this.save(e); } }, click: function(e) { if (hasLeftMouseButton(e)) { this.forward(e); } }, touchstart: function(e) { this.save(e.changedTouches[0], e); }, touchend: function(e) { this.forward(e.changedTouches[0], e); }, forward: function(e, preventer) { var dx = Math.abs(e.clientX - this.info.x); var dy = Math.abs(e.clientY - this.info.y); var t = Gestures.findOriginalTarget(e); // dx,dy can be NaN if `click` has been simulated and there was no `down` for `start` if (isNaN(dx) || isNaN(dy) || (dx <= TAP_DISTANCE && dy <= TAP_DISTANCE) || isSyntheticClick(e)) { // prevent taps from being generated if an event has canceled them if (!this.info.prevent) { Gestures.fire(t, 'tap', { x: e.clientX, y: e.clientY, sourceEvent: e, preventer: preventer }); } } } }); var DIRECTION_MAP = { x: 'pan-x', y: 'pan-y', none: 'none', all: 'auto' }; Polymer.Base._addFeature({ _setupGestures: function() { this.__polymerGestures = null; }, // override _listen to handle gestures _listen: function(node, eventName, handler) { if (Gestures.gestures[eventName]) { Gestures.add(node, eventName, handler); } else { node.addEventListener(eventName, handler); } }, // override _unlisten to handle gestures _unlisten: function(node, eventName, handler) { if (Gestures.gestures[eventName]) { Gestures.remove(node, eventName, handler); } else { node.removeEventListener(eventName, handler); } }, /** * Override scrolling behavior to all direction, one direction, or none. * * Valid scroll directions: * - 'all': scroll in any direction * - 'x': scroll only in the 'x' direction * - 'y': scroll only in the 'y' direction * - 'none': disable scrolling for this node * * @method setScrollDirection * @param {String=} direction Direction to allow scrolling * Defaults to `all`. * @param {HTMLElement=} node Element to apply scroll direction setting. * Defaults to `this`. */ setScrollDirection: function(direction, node) { node = node || this; Gestures.setTouchAction(node, DIRECTION_MAP[direction] || 'auto'); } }); // export Polymer.Gestures = Gestures; })(); </script>
Java
require 'rubygems' # The helper class exists to do string manipulation and heavy lifting def url_escape_hash(hash) hash.each do |k,v| v = CGI::escapeHTML(v) if v # convert bullets v = v.gsub("*-","<bullet>") v = v.gsub("-*","</bullet>") #convert h4 v = v.gsub("[==","<h4>") v = v.gsub("==]","</h4>") #convert indent text v = v.gsub("[--","<indented>") v = v.gsub("--]","</indented>") #convert indent text v = v.gsub("[~~","<italics>") v = v.gsub("~~]","</italics>") end # replace linebreaks with paragraph xml elements if v =~ /\r\n/ new_v = "" brs = v.split("\r\n") brs.each do |br| new_v << "<paragraph>" new_v << br new_v << "</paragraph>" end v = new_v elsif k == "remediation" or k == "overview" or k == "poc" or k == "affected_hosts" new_v = "<paragraph>#{v}</paragraph>" v = new_v end hash[k] = v end return hash end def meta_markup(text) new_text = text.gsub("<paragraph>","&#x000A;").gsub("</paragraph>","") new_text = new_text.gsub("<bullet>","*-").gsub("</bullet>","-*") new_text = new_text.gsub("<h4>","[==").gsub("</h4>","==]") new_text = new_text.gsub("<code>","[[[").gsub("</code>","]]]") new_text = new_text.gsub("<indented>","[--").gsub("</indented>","--]") new_text = new_text.gsub("<italics>","[~~").gsub("</italics>","~~]") end # URL escaping messes up the inserted XML, this method switches it back to XML elements def meta_markup_unencode(findings_xml, customer_name) # code tags get added in later findings_xml = findings_xml.gsub("[[[","<code>") findings_xml = findings_xml.gsub("]]]","</code>") # creates paragraphs findings_xml = findings_xml.gsub("&lt;paragraph&gt;","<paragraph>") findings_xml = findings_xml.gsub("&lt;/paragraph&gt;","</paragraph>") # same for the bullets findings_xml = findings_xml.gsub("&lt;bullet&gt;","<bullet>") findings_xml = findings_xml.gsub("&lt;/bullet&gt;","</bullet>") # same for the h4 findings_xml = findings_xml.gsub("&lt;h4&gt;","<h4>") findings_xml = findings_xml.gsub("&lt;/h4&gt;","</h4>") # same for the code markings findings_xml = findings_xml.gsub("&lt;code&gt;","<code>") findings_xml = findings_xml.gsub("&lt;/code&gt;","</code>") # same for the indented text findings_xml = findings_xml.gsub("&lt;indented&gt;","<indented>") findings_xml = findings_xml.gsub("&lt;/indented&gt;","</indented>") # same for the indented text findings_xml = findings_xml.gsub("&lt;italics&gt;","<italics>") findings_xml = findings_xml.gsub("&lt;/italics&gt;","</italics>") # changes the <<CUSTOMER>> marks if customer_name findings_xml = findings_xml.gsub("&amp;lt;&amp;lt;CUSTOMER&amp;gt;&amp;gt;","#{customer_name}") end #this is for re-upping the comment fields findings_xml = findings_xml.gsub("&lt;modified&gt;","<modified>") findings_xml = findings_xml.gsub("&lt;/modified&gt;","</modified>") findings_xml = findings_xml.gsub("&lt;new_finding&gt;","<new_finding>") findings_xml = findings_xml.gsub("&lt;/new_finding&gt;","</new_finding>") # these are for beautification findings_xml = findings_xml.gsub("&amp;quot;","\"") findings_xml = findings_xml.gsub("&amp;","&") findings_xml = findings_xml.gsub("&amp;lt;","&lt;").gsub("&amp;gt;","&gt;") return findings_xml end def compare_text(new_text, orig_text) if orig_text == nil # there is no master finding, must be new t = "" t << "<new_finding></new_finding>#{new_text}" return t end if new_text == orig_text return new_text else n_t = "" n_t << "<modified></modified>#{new_text}" return n_t end end # CVSS helper, there is a lot of hardcoded stuff def cvss(data) av = data["av"].downcase ac = data["ac"].downcase au = data["au"].downcase c = data["c"].downcase i = data["i"].downcase a = data["a"].downcase e = data["e"].downcase rl = data["rl"].downcase rc = data["rc"].downcase cdp = data["cdp"].downcase td = data["td"].downcase cr = data["cr"].downcase ir = data["ir"].downcase ar = data["ar"].downcase if ac == "high" cvss_ac = 0.35 elsif ac == "medium" cvss_ac = 0.61 else cvss_ac = 0.71 end if au == "none" cvss_au = 0.704 elsif au == "single" cvss_au = 0.56 else cvss_au = 0.45 end if av == "local" cvss_av = 0.395 elsif av == "local network" cvss_av = 0.646 else cvss_av = 1 end if c == "none" cvss_c = 0 elsif c == "partial" cvss_c = 0.275 else cvss_c = 0.660 end if i == "none" cvss_i = 00 elsif i == "partial" cvss_i = 0.275 else cvss_i = 0.660 end if a == "none" cvss_a = 0 elsif a == "partial" cvss_a = 0.275 else cvss_a = 0.660 end # temporal score calculations if e == "unproven exploit exists" cvss_e = 0.85 elsif e == "proof-of-concept code" cvss_e = 0.90 elsif e == "functional exploit exists" cvss_e = 0.95 else cvss_e = 1 end if rl == "official fix" cvss_rl = 0.87 elsif rl == "temporary fix" cvss_rl = 0.90 elsif rl == "workaround" cvss_rl = 0.95 else cvss_rl = 1 end if rc == "unconfirmed" cvss_rc = 0.90 elsif rc == "uncorroborated" cvss_rc = 0.95 else cvss_rc = 1 end #environemental if cdp == "low" cvss_cdp = 0.1 elsif cdp == "low-medium" cvss_cdp = 0.3 elsif cdp == "medium-high" cvss_cdp = 0.4 elsif cdp == "high" cvss_cdp = 0.5 else cvss_cdp = 0 end if td == "none" cvss_td = 0 elsif td == "low" cvss_td = 0.25 elsif td == "medium" cvss_td = 0.75 else cvss_td = 1 end if cr == "low" cvss_cr = 0.5 elsif cr == "high" cvss_cr = 1.51 else cvss_cr = 1 end if ir == "low" cvss_ir = 0.5 elsif ir == "high" cvss_ir = 1.51 else cvss_ir = 1 end if ar == "low" cvss_ar = 0.5 elsif ar == "high" cvss_ar = 1.51 else cvss_ar = 1 end cvss_impact = 10.41 * (1 - (1 - cvss_c) * (1 - cvss_i) * (1 - cvss_a)) cvss_exploitability = 20 * cvss_ac * cvss_au * cvss_av if cvss_impact == 0 cvss_impact_f = 0 else cvss_impact_f = 1.176 end cvss_base = (0.6*cvss_impact + 0.4*cvss_exploitability-1.5)*cvss_impact_f cvss_temporal = cvss_base * cvss_e * cvss_rl * cvss_rc cvss_modified_impact = [10, 10.41 * (1 - (1 - cvss_c * cvss_cr) * (1 - cvss_i * cvss_ir) * (1 - cvss_a * cvss_ar))].min if cvss_modified_impact == 0 cvss_modified_impact_f = 0 else cvss_modified_impact_f = 1.176 end cvss_modified_base = (0.6*cvss_modified_impact + 0.4*cvss_exploitability-1.5)*cvss_modified_impact_f cvss_adjusted_temporal = cvss_modified_base * cvss_e * cvss_rl * cvss_rc cvss_environmental = (cvss_adjusted_temporal + (10 - cvss_adjusted_temporal) * cvss_cdp) * cvss_td if cvss_environmental cvss_total = cvss_environmental elsif cvss_temporal cvss_total = cvss_temporal else cvss_total = cvss_base end data["cvss_base"] = sprintf("%0.1f" % cvss_base) data["cvss_impact"] = sprintf("%0.1f" % cvss_impact) data["cvss_exploitability"] = sprintf("%0.1f" % cvss_exploitability) data["cvss_temporal"] = sprintf("%0.1f" % cvss_temporal) data["cvss_environmental"] = sprintf("%0.1f" % cvss_environmental) data["cvss_modified_impact"] = sprintf("%0.1f" % cvss_modified_impact) data["cvss_total"] = sprintf("%0.1f" % cvss_total) return data end # there are three scoring types; risk, dread and cvss # this sets a score for all three in case the user switches later def convert_score(finding) if(finding.cvss_total == nil) puts "|!| No CVSS score exists" finding.cvss_total = 0 end if(finding.dread_total == nil) puts "|!| No CVSS score exists" finding.dread_total = 0 end if(finding.risk == nil) puts "|!| No CVSS score exists" finding.risk = 0 end return finding end
Java
/* * Copyright (C) Research In Motion Limited 2011. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "config.h" #include "core/svg/SVGFEDropShadowElement.h" #include "SVGNames.h" #include "core/rendering/style/RenderStyle.h" #include "core/rendering/style/SVGRenderStyle.h" #include "core/svg/SVGElementInstance.h" #include "core/svg/SVGParserUtilities.h" #include "core/svg/graphics/filters/SVGFilterBuilder.h" namespace WebCore { // Animated property definitions DEFINE_ANIMATED_STRING(SVGFEDropShadowElement, SVGNames::inAttr, In1, in1) DEFINE_ANIMATED_NUMBER(SVGFEDropShadowElement, SVGNames::dxAttr, Dx, dx) DEFINE_ANIMATED_NUMBER(SVGFEDropShadowElement, SVGNames::dyAttr, Dy, dy) DEFINE_ANIMATED_NUMBER_MULTIPLE_WRAPPERS(SVGFEDropShadowElement, SVGNames::stdDeviationAttr, stdDeviationXIdentifier(), StdDeviationX, stdDeviationX) DEFINE_ANIMATED_NUMBER_MULTIPLE_WRAPPERS(SVGFEDropShadowElement, SVGNames::stdDeviationAttr, stdDeviationYIdentifier(), StdDeviationY, stdDeviationY) BEGIN_REGISTER_ANIMATED_PROPERTIES(SVGFEDropShadowElement) REGISTER_LOCAL_ANIMATED_PROPERTY(in1) REGISTER_LOCAL_ANIMATED_PROPERTY(dx) REGISTER_LOCAL_ANIMATED_PROPERTY(dy) REGISTER_LOCAL_ANIMATED_PROPERTY(stdDeviationX) REGISTER_LOCAL_ANIMATED_PROPERTY(stdDeviationY) REGISTER_PARENT_ANIMATED_PROPERTIES(SVGFilterPrimitiveStandardAttributes) END_REGISTER_ANIMATED_PROPERTIES inline SVGFEDropShadowElement::SVGFEDropShadowElement(const QualifiedName& tagName, Document* document) : SVGFilterPrimitiveStandardAttributes(tagName, document) , m_dx(2) , m_dy(2) , m_stdDeviationX(2) , m_stdDeviationY(2) { ASSERT(hasTagName(SVGNames::feDropShadowTag)); ScriptWrappable::init(this); registerAnimatedPropertiesForSVGFEDropShadowElement(); } PassRefPtr<SVGFEDropShadowElement> SVGFEDropShadowElement::create(const QualifiedName& tagName, Document* document) { return adoptRef(new SVGFEDropShadowElement(tagName, document)); } const AtomicString& SVGFEDropShadowElement::stdDeviationXIdentifier() { DEFINE_STATIC_LOCAL(AtomicString, s_identifier, ("SVGStdDeviationX", AtomicString::ConstructFromLiteral)); return s_identifier; } const AtomicString& SVGFEDropShadowElement::stdDeviationYIdentifier() { DEFINE_STATIC_LOCAL(AtomicString, s_identifier, ("SVGStdDeviationY", AtomicString::ConstructFromLiteral)); return s_identifier; } void SVGFEDropShadowElement::setStdDeviation(float x, float y) { setStdDeviationXBaseValue(x); setStdDeviationYBaseValue(y); invalidate(); } bool SVGFEDropShadowElement::isSupportedAttribute(const QualifiedName& attrName) { DEFINE_STATIC_LOCAL(HashSet<QualifiedName>, supportedAttributes, ()); if (supportedAttributes.isEmpty()) { supportedAttributes.add(SVGNames::inAttr); supportedAttributes.add(SVGNames::dxAttr); supportedAttributes.add(SVGNames::dyAttr); supportedAttributes.add(SVGNames::stdDeviationAttr); } return supportedAttributes.contains<SVGAttributeHashTranslator>(attrName); } void SVGFEDropShadowElement::parseAttribute(const QualifiedName& name, const AtomicString& value) { if (!isSupportedAttribute(name)) { SVGFilterPrimitiveStandardAttributes::parseAttribute(name, value); return; } if (name == SVGNames::stdDeviationAttr) { float x, y; if (parseNumberOptionalNumber(value, x, y)) { setStdDeviationXBaseValue(x); setStdDeviationYBaseValue(y); } return; } if (name == SVGNames::inAttr) { setIn1BaseValue(value); return; } if (name == SVGNames::dxAttr) { setDxBaseValue(value.toFloat()); return; } if (name == SVGNames::dyAttr) { setDyBaseValue(value.toFloat()); return; } ASSERT_NOT_REACHED(); } void SVGFEDropShadowElement::svgAttributeChanged(const QualifiedName& attrName) { if (!isSupportedAttribute(attrName)) { SVGFilterPrimitiveStandardAttributes::svgAttributeChanged(attrName); return; } SVGElementInstance::InvalidationGuard invalidationGuard(this); if (attrName == SVGNames::inAttr || attrName == SVGNames::stdDeviationAttr || attrName == SVGNames::dxAttr || attrName == SVGNames::dyAttr) { invalidate(); return; } ASSERT_NOT_REACHED(); } PassRefPtr<FilterEffect> SVGFEDropShadowElement::build(SVGFilterBuilder* filterBuilder, Filter* filter) { RenderObject* renderer = this->renderer(); if (!renderer) return 0; if (stdDeviationX() < 0 || stdDeviationY() < 0) return 0; ASSERT(renderer->style()); const SVGRenderStyle* svgStyle = renderer->style()->svgStyle(); Color color = svgStyle->floodColor(); float opacity = svgStyle->floodOpacity(); FilterEffect* input1 = filterBuilder->getEffectById(in1()); if (!input1) return 0; RefPtr<FilterEffect> effect = FEDropShadow::create(filter, stdDeviationX(), stdDeviationY(), dx(), dy(), color, opacity); effect->inputEffects().append(input1); return effect.release(); } }
Java
<?php namespace modules\translations\controllers\console; use modules\lang\models\Lang; use modules\translations\models\MessageCategory; use Yii; use yii\console\Controller; use yii\console\Exception; use yii\helpers\FileHelper; use modules\translations\models\Message; use modules\translations\models\SourceMessage; class I18nController extends Controller { /** * @param string $sourcePath * @throws Exception */ public function actionImport($sourcePath = null) { if (!$sourcePath) { $sourcePath = $this->prompt('Enter a source path'); } $sourcePath = realpath(Yii::getAlias($sourcePath)); if (!is_dir($sourcePath)) { throw new Exception('The source path ' . $sourcePath . ' is not a valid directory.'); } $translationsFiles = FileHelper::findFiles($sourcePath, ['only' => ['*.php']]); foreach ($translationsFiles as $translationsFile) { $relativePath = trim(str_replace([$sourcePath, '.php'], '', $translationsFile), '/,\\'); $relativePath = FileHelper::normalizePath($relativePath, '/'); $relativePath = explode('/', $relativePath, 2); if (count($relativePath) > 1) { $language = $this->prompt('Enter language.', ['default' => $relativePath[0]]); $category = $this->prompt('Enter category.', ['default' => $relativePath[1]]); $categoryId = $this->addCategory($category); $translations = require_once $translationsFile; if (is_array($translations)) { foreach ($translations as $sourceMessage => $translation) { if (!empty($translation)) { $sourceMessage = $this->getSourceMessage($categoryId, $sourceMessage); $this->setTranslation($sourceMessage, $language, $translation); } } } } } echo PHP_EOL . 'Done.' . PHP_EOL; } private function addCategory($category){ $catModel = MessageCategory::find()->where(['name'=>$category])->one(); if(!$catModel){ $catModel = new MessageCategory(); $catModel->name = $category; $catModel->save(); } return $catModel->id; } /** * @param string $category * @param string $message * @return SourceMessage */ private function getSourceMessage($category, $message) { $params = [ 'category_id' => $category, 'message' => $message ]; $sourceMessage = SourceMessage::find() ->where($params) ->with('messages') ->one(); if (!$sourceMessage) { $sourceMessage = new SourceMessage; $sourceMessage->setAttributes($params, false); $sourceMessage->save(false); } return $sourceMessage; } /** * @param SourceMessage $sourceMessage * @param string $language * @param string $translation */ private function setTranslation($sourceMessage, $language, $translation) { /** @var Message[] $messages */ $messages = $sourceMessage->messages; $lang = Lang::find()->where(['url'=>$language])->one(); if(!$lang) return; $langId = $lang->id; if (isset($messages[$langId]) /*&& $messages[$langId]->translation === null*/) { $messages[$langId]->translation = $translation; $messages[$langId]->save(false); } elseif (!isset($messages[$langId])) { $message = new Message; $message->setAttributes([ 'lang_id' => $langId, 'translation' => $translation ], false); $sourceMessage->link('messages', $message); } } public function actionFlush() { $tableNames = [ Message::tableName(), SourceMessage::tableName() ]; $db = Yii::$app->getDb(); foreach ($tableNames as $tableName) { $db->createCommand() ->delete($tableName) ->execute(); } echo PHP_EOL . 'Done.' . PHP_EOL; } }
Java
#include <CGAL/Epick_d.h> #include <CGAL/point_generators_d.h> #include <CGAL/Manhattan_distance_iso_box_point.h> #include <CGAL/K_neighbor_search.h> #include <CGAL/Search_traits_d.h> typedef CGAL::Epick_d<CGAL::Dimension_tag<4> > Kernel; typedef Kernel::Point_d Point_d; typedef CGAL::Random_points_in_cube_d<Point_d> Random_points_iterator; typedef Kernel::Iso_box_d Iso_box_d; typedef Kernel TreeTraits; typedef CGAL::Manhattan_distance_iso_box_point<TreeTraits> Distance; typedef CGAL::K_neighbor_search<TreeTraits, Distance> Neighbor_search; typedef Neighbor_search::Tree Tree; int main() { const int N = 1000; const unsigned int K = 10; Tree tree; Random_points_iterator rpit(4,1000.0); for(int i = 0; i < N; i++){ tree.insert(*rpit++); } Point_d pp(0.1,0.1,0.1,0.1); Point_d qq(0.2,0.2,0.2,0.2); Iso_box_d query(pp,qq); Distance tr_dist; Neighbor_search N1(tree, query, 5, 10.0, false); // eps=10.0, nearest=false std::cout << "For query rectangle = [0.1, 0.2]^4 " << std::endl << "the " << K << " approximate furthest neighbors are: " << std::endl; for (Neighbor_search::iterator it = N1.begin();it != N1.end();it++) { std::cout << " Point " << it->first << " at distance " << tr_dist.inverse_of_transformed_distance(it->second) << std::endl; } return 0; }
Java
/*L * Copyright RTI International * * Distributed under the OSI-approved BSD 3-Clause License. * See http://ncip.github.com/webgenome/LICENSE.txt for details. */ /* $Revision: 1.1 $ $Date: 2007-08-22 20:03:57 $ */ package org.rti.webgenome.webui.struts.upload; import javax.servlet.http.HttpServletRequest; import org.apache.struts.action.ActionError; import org.apache.struts.action.ActionErrors; import org.apache.struts.action.ActionMapping; import org.rti.webgenome.util.SystemUtils; import org.rti.webgenome.webui.struts.BaseForm; /** * Form for inputting the name of a rectangular file * column that contains reporter names. * @author dhall * */ public class ReporterColumnNameForm extends BaseForm { /** Serialized version ID. */ private static final long serialVersionUID = SystemUtils.getLongApplicationProperty("serial.version.uid"); /** Name of column containing reporter names. */ private String reporterColumnName = null; /** * Get name of column containing reporter names. * @return Column heading. */ public String getReporterColumnName() { return reporterColumnName; } /** * Set name of column containing reporter names. * @param reporterColumnName Column heading. */ public void setReporterColumnName(final String reporterColumnName) { this.reporterColumnName = reporterColumnName; } /** * {@inheritDoc} */ @Override public ActionErrors validate(final ActionMapping mapping, final HttpServletRequest request) { ActionErrors errors = new ActionErrors(); if (this.reporterColumnName == null || this.reporterColumnName.length() < 1) { errors.add("reporterColumnName", new ActionError("invalid.field")); } if (errors.size() > 0) { errors.add("global", new ActionError("invalid.fields")); } return errors; } }
Java
//===-- Status.h - Renders a 96x32 Status Display ------------------ c++ --===// // // UWH Timer // // This file is distributed under the BSD 3-Clause License. // See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef STATUS_H #define STATUS_H class UWHDCanvas; void renderStatus(UWHDCanvas *C); #endif
Java
from datetime import datetime, timedelta from django import http from django.conf import settings from django.core.exceptions import PermissionDenied import mock import pytest from olympia.amo.tests import BaseTestCase, TestCase from olympia.amo import decorators, get_user, set_user from olympia.amo.urlresolvers import reverse from olympia.users.models import UserProfile pytestmark = pytest.mark.django_db def test_post_required(): def func(request): return mock.sentinel.response g = decorators.post_required(func) request = mock.Mock() request.method = 'GET' assert isinstance(g(request), http.HttpResponseNotAllowed) request.method = 'POST' assert g(request) == mock.sentinel.response def test_json_view(): """Turns a Python object into a response.""" def func(request): return {'x': 1} response = decorators.json_view(func)(mock.Mock()) assert isinstance(response, http.HttpResponse) assert response.content == '{"x": 1}' assert response['Content-Type'] == 'application/json' assert response.status_code == 200 def test_json_view_normal_response(): """Normal responses get passed through.""" expected = http.HttpResponseForbidden() def func(request): return expected response = decorators.json_view(func)(mock.Mock()) assert expected is response assert response['Content-Type'] == 'text/html; charset=utf-8' def test_json_view_error(): """json_view.error returns 400 responses.""" response = decorators.json_view.error({'msg': 'error'}) assert isinstance(response, http.HttpResponseBadRequest) assert response.content == '{"msg": "error"}' assert response['Content-Type'] == 'application/json' def test_json_view_status(): def func(request): return {'x': 1} response = decorators.json_view(func, status_code=202)(mock.Mock()) assert response.status_code == 202 def test_json_view_response_status(): response = decorators.json_response({'msg': 'error'}, status_code=202) assert response.content == '{"msg": "error"}' assert response['Content-Type'] == 'application/json' assert response.status_code == 202 class TestTaskUser(TestCase): fixtures = ['base/users'] def test_set_task_user(self): @decorators.set_task_user def some_func(): return get_user() set_user(UserProfile.objects.get(username='regularuser')) assert get_user().pk == 999 assert some_func().pk == int(settings.TASK_USER_ID) assert get_user().pk == 999 class TestLoginRequired(BaseTestCase): def setUp(self): super(TestLoginRequired, self).setUp() self.f = mock.Mock() self.f.__name__ = 'function' self.request = mock.Mock() self.request.user.is_authenticated.return_value = False self.request.get_full_path.return_value = 'path' def test_normal(self): func = decorators.login_required(self.f) response = func(self.request) assert not self.f.called assert response.status_code == 302 assert response['Location'] == ( '%s?to=%s' % (reverse('users.login'), 'path')) def test_no_redirect(self): func = decorators.login_required(self.f, redirect=False) response = func(self.request) assert not self.f.called assert response.status_code == 401 def test_decorator_syntax(self): # @login_required(redirect=False) func = decorators.login_required(redirect=False)(self.f) response = func(self.request) assert not self.f.called assert response.status_code == 401 def test_no_redirect_success(self): func = decorators.login_required(redirect=False)(self.f) self.request.user.is_authenticated.return_value = True func(self.request) assert self.f.called class TestSetModifiedOn(TestCase): fixtures = ['base/users'] @decorators.set_modified_on def some_method(self, worked): return worked def test_set_modified_on(self): users = list(UserProfile.objects.all()[:3]) self.some_method(True, set_modified_on=users) for user in users: assert UserProfile.objects.get(pk=user.pk).modified.date() == ( datetime.today().date()) def test_not_set_modified_on(self): yesterday = datetime.today() - timedelta(days=1) qs = UserProfile.objects.all() qs.update(modified=yesterday) users = list(qs[:3]) self.some_method(False, set_modified_on=users) for user in users: date = UserProfile.objects.get(pk=user.pk).modified.date() assert date < datetime.today().date() class TestPermissionRequired(TestCase): def setUp(self): super(TestPermissionRequired, self).setUp() self.f = mock.Mock() self.f.__name__ = 'function' self.request = mock.Mock() @mock.patch('olympia.access.acl.action_allowed') def test_permission_not_allowed(self, action_allowed): action_allowed.return_value = False func = decorators.permission_required('', '')(self.f) with self.assertRaises(PermissionDenied): func(self.request) @mock.patch('olympia.access.acl.action_allowed') def test_permission_allowed(self, action_allowed): action_allowed.return_value = True func = decorators.permission_required('', '')(self.f) func(self.request) assert self.f.called @mock.patch('olympia.access.acl.action_allowed') def test_permission_allowed_correctly(self, action_allowed): func = decorators.permission_required('Admin', '%')(self.f) func(self.request) action_allowed.assert_called_with(self.request, 'Admin', '%')
Java
package main import ( "github.com/kataras/iris/v12" "github.com/kataras/iris/v12/middleware/requestid" "github.com/kataras/golog" ) func main() { app := iris.New() app.Logger().SetLevel("debug") app.Logger().SetFormat("json", " ") // to register a custom Formatter: // app.Logger().RegisterFormatter(golog.Formatter...) // Also, see app.Logger().SetLevelOutput(level string, w io.Writer) // to set a custom writer for a specific level. app.Use(requestid.New()) /* Example Output: { "timestamp": 1591422944, "level": "debug", "message": "This is a message with data", "fields": { "username": "kataras" }, "stacktrace": [ { "function": "main.main", "source": "C:/mygopath/src/github.com/kataras/iris/_examples/logging/json-logger/main.go:16" } ] } */ app.Logger().Debugf("This is a %s with data (debug prints the stacktrace too)", "message", golog.Fields{ "username": "kataras", }) /* Example Output: { "timestamp": 1591422944, "level": "info", "message": "An info message", "fields": { "home": "https://iris-go.com" } } */ app.Logger().Infof("An info message", golog.Fields{"home": "https://iris-go.com"}) app.Get("/ping", ping) // Navigate to http://localhost:8080/ping. app.Listen(":8080" /*, iris.WithoutBanner*/) } func ping(ctx iris.Context) { /* Example Output: { "timestamp": 1591423046, "level": "debug", "message": "Request path: /ping", "fields": { "request_id": "fc12d88a-a338-4bb9-aa5e-126f2104365c" }, "stacktrace": [ { "function": "main.ping", "source": "C:/mygopath/src/github.com/kataras/iris/_examples/logging/json-logger/main.go:82" }, ... ] } */ ctx.Application().Logger().Debugf("Request path: %s", ctx.Path(), golog.Fields{ "request_id": ctx.GetID(), }) ctx.WriteString("pong") }
Java
from django.core.management.base import BaseCommand import amo from mkt.webapps.models import AddonPremium class Command(BaseCommand): help = 'Clean up existing AddonPremium objects for free apps.' def handle(self, *args, **options): (AddonPremium.objects.filter(addon__premium_type__in=amo.ADDON_FREES) .delete())
Java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE23_Relative_Path_Traversal__char_file_ifstream_08.cpp Label Definition File: CWE23_Relative_Path_Traversal.label.xml Template File: sources-sink-08.tmpl.cpp */ /* * @description * CWE: 23 Relative Path Traversal * BadSource: file Read input from a file * GoodSource: Use a fixed file name * Sink: ifstream * BadSink : Open the file named in data using ifstream::open() * Flow Variant: 08 Control flow: if(staticReturnsTrue()) and if(staticReturnsFalse()) * * */ #include "std_testcase.h" #ifdef _WIN32 #define BASEPATH "c:\\temp\\" #else #include <wchar.h> #define BASEPATH "/tmp/" #endif #ifdef _WIN32 #define FILENAME "C:\\temp\\file.txt" #else #define FILENAME "/tmp/file.txt" #endif #include <fstream> using namespace std; /* The two function below always return the same value, so a tool should be able to identify that calls to the functions will always return a fixed value. */ static int staticReturnsTrue() { return 1; } static int staticReturnsFalse() { return 0; } namespace CWE23_Relative_Path_Traversal__char_file_ifstream_08 { #ifndef OMITBAD void bad() { char * data; char dataBuffer[FILENAME_MAX] = BASEPATH; data = dataBuffer; if(staticReturnsTrue()) { { /* Read input from a file */ size_t dataLen = strlen(data); FILE * pFile; /* if there is room in data, attempt to read the input from a file */ if (FILENAME_MAX-dataLen > 1) { pFile = fopen(FILENAME, "r"); if (pFile != NULL) { /* POTENTIAL FLAW: Read data from a file */ if (fgets(data+dataLen, (int)(FILENAME_MAX-dataLen), pFile) == NULL) { printLine("fgets() failed"); /* Restore NUL terminator if fgets fails */ data[dataLen] = '\0'; } fclose(pFile); } } } } { ifstream inputFile; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ inputFile.open((char *)data); inputFile.close(); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the staticReturnsTrue() to staticReturnsFalse() */ static void goodG2B1() { char * data; char dataBuffer[FILENAME_MAX] = BASEPATH; data = dataBuffer; if(staticReturnsFalse()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Use a fixed file name */ strcat(data, "file.txt"); } { ifstream inputFile; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ inputFile.open((char *)data); inputFile.close(); } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { char * data; char dataBuffer[FILENAME_MAX] = BASEPATH; data = dataBuffer; if(staticReturnsTrue()) { /* FIX: Use a fixed file name */ strcat(data, "file.txt"); } { ifstream inputFile; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ inputFile.open((char *)data); inputFile.close(); } } void good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE23_Relative_Path_Traversal__char_file_ifstream_08; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
Java
% diagram A1-2 \verbatimlines % inc Incorporating new mail into inbox... 1+E02/28 0227-EST mrose test message <<ENCRYPTED MESSAGE: TTI Incorporating encrypted mail into inbox... 1+ 02/28 0227-EST mrose test message <<mumble, mumble. >> ?endverbatim
Java
package org.hisp.dhis.sms.listener; /* * Copyright (c) 2004-2018, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import java.util.*; import org.apache.commons.lang3.StringUtils; import org.hisp.dhis.organisationunit.OrganisationUnit; import org.hisp.dhis.program.Program; import org.hisp.dhis.program.ProgramInstanceService; import org.hisp.dhis.sms.command.SMSCommand; import org.hisp.dhis.sms.command.SMSCommandService; import org.hisp.dhis.sms.command.code.SMSCode; import org.hisp.dhis.sms.incoming.IncomingSms; import org.hisp.dhis.sms.incoming.SmsMessageStatus; import org.hisp.dhis.sms.parse.ParserType; import org.hisp.dhis.sms.parse.SMSParserException; import org.hisp.dhis.system.util.SmsUtils; import org.hisp.dhis.trackedentity.TrackedEntityAttribute; import org.hisp.dhis.trackedentity.TrackedEntityInstance; import org.hisp.dhis.trackedentity.TrackedEntityInstanceService; import org.hisp.dhis.trackedentity.TrackedEntityTypeService; import org.hisp.dhis.trackedentityattributevalue.TrackedEntityAttributeValue; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.transaction.annotation.Transactional; @Transactional public class TrackedEntityRegistrationSMSListener extends BaseSMSListener { private static final String SUCCESS_MESSAGE = "Tracked Entity Registered Successfully with uid. "; // ------------------------------------------------------------------------- // Dependencies // ------------------------------------------------------------------------- @Autowired private SMSCommandService smsCommandService; @Autowired private TrackedEntityTypeService trackedEntityTypeService; @Autowired private TrackedEntityInstanceService trackedEntityInstanceService; @Autowired private ProgramInstanceService programInstanceService; // ------------------------------------------------------------------------- // IncomingSmsListener implementation // ------------------------------------------------------------------------- @Override protected void postProcess( IncomingSms sms, SMSCommand smsCommand, Map<String, String> parsedMessage ) { String message = sms.getText(); Date date = SmsUtils.lookForDate( message ); String senderPhoneNumber = StringUtils.replace( sms.getOriginator(), "+", "" ); Collection<OrganisationUnit> orgUnits = getOrganisationUnits( sms ); Program program = smsCommand.getProgram(); OrganisationUnit orgUnit = SmsUtils.selectOrganisationUnit( orgUnits, parsedMessage, smsCommand ); if ( !program.hasOrganisationUnit( orgUnit ) ) { sendFeedback( SMSCommand.NO_OU_FOR_PROGRAM, senderPhoneNumber, WARNING ); throw new SMSParserException( SMSCommand.NO_OU_FOR_PROGRAM ); } TrackedEntityInstance trackedEntityInstance = new TrackedEntityInstance(); trackedEntityInstance.setOrganisationUnit( orgUnit ); trackedEntityInstance.setTrackedEntityType( trackedEntityTypeService.getTrackedEntityByName( smsCommand.getProgram().getTrackedEntityType().getName() ) ); Set<TrackedEntityAttributeValue> patientAttributeValues = new HashSet<>(); smsCommand.getCodes().stream() .filter( code -> parsedMessage.containsKey( code.getCode() ) ) .forEach( code -> { TrackedEntityAttributeValue trackedEntityAttributeValue = this.createTrackedEntityAttributeValue( parsedMessage, code, trackedEntityInstance) ; patientAttributeValues.add( trackedEntityAttributeValue ); }); int trackedEntityInstanceId = 0; if ( patientAttributeValues.size() > 0 ) { trackedEntityInstanceId = trackedEntityInstanceService.createTrackedEntityInstance( trackedEntityInstance, null, null, patientAttributeValues ); } else { sendFeedback( "No TrackedEntityAttribute found", senderPhoneNumber, WARNING ); } TrackedEntityInstance tei = trackedEntityInstanceService.getTrackedEntityInstance( trackedEntityInstanceId ); programInstanceService.enrollTrackedEntityInstance( tei, smsCommand.getProgram(), new Date(), date, orgUnit ); sendFeedback( StringUtils.defaultIfBlank( smsCommand.getSuccessMessage(), SUCCESS_MESSAGE + tei.getUid() ), senderPhoneNumber, INFO ); update( sms, SmsMessageStatus.PROCESSED, true ); } @Override protected SMSCommand getSMSCommand( IncomingSms sms ) { return smsCommandService.getSMSCommand( SmsUtils.getCommandString( sms ), ParserType.TRACKED_ENTITY_REGISTRATION_PARSER ); } private TrackedEntityAttributeValue createTrackedEntityAttributeValue( Map<String, String> parsedMessage, SMSCode code, TrackedEntityInstance trackedEntityInstance ) { String value = parsedMessage.get( code.getCode() ); TrackedEntityAttribute trackedEntityAttribute = code.getTrackedEntityAttribute(); TrackedEntityAttributeValue trackedEntityAttributeValue = new TrackedEntityAttributeValue(); trackedEntityAttributeValue.setAttribute( trackedEntityAttribute ); trackedEntityAttributeValue.setEntityInstance( trackedEntityInstance ); trackedEntityAttributeValue.setValue( value ); return trackedEntityAttributeValue; } }
Java
<?php /** * Message translations. * * This file is automatically generated by 'yiic message' command. * It contains the localizable messages extracted from source code. * You may modify this file by translating the extracted messages. * * Each array element represents the translation (value) of a message (key). * If the value is empty, the message is considered as not translated. * Messages that no longer need translation will have their translations * enclosed between a pair of '@@' marks. * * Message string can be used with plural forms format. Check i18n section * of the guide for details. * * NOTE, this file must be saved in UTF-8 encoding. */ return array( 'Community' => 'Сообщество', 'forum' => 'форум', 'Thank you!' => 'Спасибо!', 'Help us!' => 'Помоги нам!', 'Error' => 'Ошибка', '0.2' => '0.2', '< Back' => '< Назад', 'Optional.' => 'Дополнительно.', '{app} can help to deploy YiiFramework project fast and easy.' => '{app} позволит быстро и легко запустить новый проект на Yiiframework', 'APC extension' => 'Расширение APC', 'Activation of Yupe core!' => 'Активация ядра Yupe!', 'All <a href="http://www.yiiframework.com/doc/api/#system.db">DB-classes</a>' => 'Все <a href="http://www.yiiframework.com/doc/api/#system.db">БД-классы</a>', 'An error occurred during the installation of modules - copying the file to a folder modulesBack with error!' => 'Во время установки модулей произошла ошибка - файлы модулей с ошибкой копируются в каталог modulesBack', 'Assets folder' => 'Каталог Assets', 'At startup errors occured, please check the permissions for the all the files and directories (see the above errors)' => 'Во время начала установки произошли ошибки. Проверьте права для всех необходимых директорий. (Смотрите ошибки ниже)', 'Can not open file \'{file}\' in write mode!' => 'Не возможно открыть файл \'{file}\' для записи', 'Configuration option safe_mode' => 'Опции конфигурации safe_mode', 'Could not obtain information about the location. Please check whether the correct value in the variable {path_info} (or {php_self} and {script_name}).' => 'Не удалось получить информацию о директории. Проверьте правильность значений переменных {path_info} (или {php_self} и {script_name})', 'Couldn\'t connect to DB with these params!' => 'Не удалось подключиться к базе данных используя текущие параметры!', 'Couldn\'t connect to DB!' => 'Не удалось подключиться к базе данных!', 'Ctype extension' => 'Расширение Ctype', 'DOM extension' => 'Расширение DOM', 'Email' => 'Email', 'Administrator e-mail' => 'Email администратора', 'Site administrator e-mail. Uses for admin cp authorization.' => 'Email администратора сайта. Используется для авторизации в панели управления.', 'Everything is fine!' => 'Все в порядке!', 'Failed to create the database!' => 'Не удалось создать базу данных!', 'File db.php' => 'Файл db.php', 'GD extension installed {br} without the support of FreeType' => 'Расширение GD установлено {br} без поддержки FreeType', 'GD extension {br} with support for FreeType {br} or ImageMagick {br} supporting PNG' => 'Расширение GD {br} с поддержкой FreeType {br} или ImageMagick {br} с поддержкой PNG', 'ImageMagick or GD extension is not installed' => 'Расширение ImageMagick или GD не установлено', 'Installed!' => 'Установлен!', 'Mcrypt extension' => 'Расширение Mcrypt', 'Memcache extension' => 'Расширение Memcache', 'Module "{module}" depends on the module "{dep}", which is not activated.' => 'Модуль "{module}" зависит от модуля "{dep}" который не был активирован.', 'Modules folder' => 'Каталог Modules', 'ModulesBack folder' => 'Каталог ModulesBack', 'Multibyte String Functions' => 'Функции для работы с мультибайтовыми строками', 'Need PHP version 5.4 and above.' => 'Необходима весрия PHP 5.4 и выше', 'PCRE extension' => 'Расширение PCRE', 'PDO MSSQL extension (<a href="http://sqlsrvphp.codeplex.com/">pdo_sqlsrv</a>)' => 'Расширение PDO MSSQL(<a href="http://sqlsrvphp.codeplex.com/">pdo_sqlsrv</a>)', 'PDO MSSQL extension (pdo_dblib)' => 'Расширение PDO MSSQL(pdo_dblib)', 'PDO MSSQL extension (pdo_mssql)' => 'Расширение PDO MSSQL(pdo_mssql)', 'PDO MySQL extension' => 'Расширение PDO MySQL', 'PDO Oracle extension' => 'Расширение PDO Oracle', 'PDO extension' => 'Расширение PDO', 'Reflection extension' => 'Расширение Reflection', 'Required for MySQL DB.' => 'Необходимо для работы с MySQL', 'Required for Oracle DB.' => 'Необходимо для работы с Oracle', 'Required for encryption and decryption methods.' => 'Необходимо для поддержки криптографических функций', 'Required to work with MSSQL database on MS Windows.' => 'Необходимо для работы с MSSQL на платформе MS Windows', 'Required to work with MSSQL database using Microsoft\'s driver' => 'Необходимо для работы с MSSQL при использовании драйвера Microsoft', 'Required to work with MSSQL database when work from GNU/Linux or Unix' => 'Необходимо для работы с MSSQL на платформе GNU/Linux или Unix', 'Runtime folder' => 'Каталог Runtime', 'SOAP extension' => 'Расширение SOAP', 'SPL extension' => 'Расширение SPL', 'Security and Safe Mode' => 'Безопасность и безопасный режим', 'Site settings saved successfully!' => 'Настройки сайта успешно сохранены!', 'The Alternative PHP Cache (APC) is a free and open opcode cache for PHP. Its goal is to provide a free, open, and robust framework for caching and optimizing PHP intermediate code. {b}Optional{/b}.' => 'APC - свободный opcode кешер для PHP. Необходим для кеширования и оптимизации выполнения PHP кода {b}Не обязателен{/b}', 'The administrator has successfully created!' => 'Администратор успешно создан!', 'The module {name} is disabled!' => 'Модуль {name} отключен!', 'The module {name} not found!' => 'Модуль с именем {name} не найден!', 'The variable $_SERVER' => 'Перменная $_SERVER', 'The variable $_SERVER does not contain {vars}.' => 'Переменная $_SERVER не содержит {vars}.', 'There should be at least one server variables: {vars}.' => 'Необходимо установить как минимум одну переменную сервера из данного списка: {vars}.', 'There was an error writing to file \'{file}\'!' => 'Произошла ошибка при попытке записи в файл \'{file}\'!', 'To use memcached, set the value of the property {useMemcachedLink} equal {code_true}.' => 'Для использования memcached установите значение свойства {useMemcachedLink} равным {code_true}.', 'Updating module\'s tables to current state!' => 'Обновление таблиц модулей до актуального состояния', 'Uploads folder' => 'Каталог загрузок', 'You need to set write permissions for the directory {folder}' => 'Необходимо установить права на запись для дитектории {folder}', 'You need to set write permissions for the directory {folder}assets' => 'Необходимо установить права на запись для дитектории {folder}assets', 'You should copy {from file} to {to file} and give it permission to write' => 'Необходимо скопировать {from file} в {to file} и дать доступ для записи', 'You should disable the directive safe_mode.' => 'Необходимо отключить директиву safe_mode', 'http://yupe.ru' => 'http://yupe.ru', 'installation of Yupe!' => 'установка Yupe!', 'mbstring extension' => 'расширение mbstring', '[email protected]' => '[email protected]', 'yupe team' => 'yupe team', 'Author:' => 'Автор:', 'You can create DB with phpmyadmin help, or with some other sql tools.' => 'Базу данных можно создать при помощи phpmyadmin или любого другого инструмента.', 'Will be installed <small class="label label-info checked-count">0</small> modules. Do you want to continue?' => 'Будет установлено <small class="label label-info checked-count">0</small> модулей. Продолжить?', 'Your site is ready to work!' => 'Ваш сайт готов к работе!', 'Version' => 'Версия', 'There is an error occured when installing modules. You can watch errors in backlog journal.' => 'Во время установки выбранных Вами модулей произошла ошибка. Вы можете ознакомиться с журналом.', 'All' => 'Все', 'All <a href="http://www.yiiframework.com/doc/api/#system.db">DB-classes</a>' => 'Все <a href="http://www.yiiframework.com/doc/api/#system.db">DB-классы</a>', 'All modules' => 'Все модули', 'Install can\'t be continued. Please check errors!' => 'Дальнейшая установка невозможна, пожалуйста, исправьте ошибки!', 'To continue installation you need to repair error was occured.' => 'Для продолжения установки необходимо исправить все возникшие проблемы.', 'Welcome!' => 'Добро пожаловать!', 'DNS and IP for DB access' => 'Домен и ip-адрес используемый для доступа к БД', 'Addition modules you can install/activate from control panel.' => 'Дополнительные модули можно будет установить/активировать через панель управления.', 'Summary modules: {all}, checked for install: {checked}' => 'Доступно модулей: {all}, выбрано для установки: {checked}', 'You can donate us some money if you like our project.' => 'Если Вам не жалко - отправьте нам чуть-чуть денежек, мы будем довольны =) !', 'Backlog journal' => 'Журнал установки', 'Dependencies' => 'Зависимости', 'Dependent' => 'Зависимые', 'Depends from' => 'Зависит от', 'Memorize please. Data form this section will need you for Control Panel access' => 'Запомните, указанные на данном этапе данные, они Вам потребуются для доступа к панели управления.', 'Value' => 'Значение', 'Installation in progress...' => 'Идет установка модулей...', 'DB name on DBMS server' => 'Имя БД на сервере СУБД', 'User name' => 'Имя пользователя', 'Using in site title' => 'Используется в заголовке сайта.', 'Using in description meta-tag' => 'Используется в поле description meta-тега.', 'Using in keywords meta-tag' => 'Используется в поле keywords meta-тега.', 'Using for administration delivery' => 'Используется для административной рассылки.', 'Sources on GitHub' => 'Исходный код на Github', 'Site keywords' => 'Ключевые слова сайта', 'Comments' => 'Комментарий', 'Admin login' => 'Логин администратора сайта.', 'Multibyte strings' => 'Многобайтные строки', 'Module for system installation' => 'Модуль для установки системы', 'Multilanguage module' => 'Модуль мультиязычный', 'System module. (Can\'t disable)' => 'Модуль не отключаемый', 'We working with it' => 'Мы активно ее пишем =)', 'We always happy to see you on our site {link}. We also have {twitter} and {forum}!' => 'Мы всегда рады видеть Вас на нашем сайте {link}, а еще у нас есть {twitter} и {forum} !', 'On this step Yupe trying to install modules you checked. This process can take several minutes...' => 'На данном этапе Юпи! постарается установить выбранные вами модули. Установка может занять некоторое время...', 'Yupe checks PHP version and needed extensions on this step.' => 'На данном этапе Юпи! проверяет версию PHP и наличие всех необходимых модулей.', 'On this step Yupe checks access right for needed directories.' => 'На данном этапе Юпи! проверяет права доступа для всех необходимых каталогов.', 'Name' => 'Название', 'DB name' => 'Название базы данных', 'Site title' => 'Название сайта', 'Start installation >' => 'Начать установку >', 'Refresh' => 'Обновить', 'Description' => 'Описание', 'Site description' => 'Описание сайта', 'Describe appearance of your Site' => 'Определяет внешний вид Вашего сайта.', 'Describe appearance of your Control Panel' => 'Определяет внешний вид панели управления.', 'Disable depends modules,<br/>which you would not like to install.' => 'Отключите зависимые,<br/>чтобы не устанавливать', 'Cancel' => 'Отмена', 'Official docs' => 'Официальная документация Юпи!', 'Official Yupe site' => 'Официальный сайт Юпи!', 'Official Yupe twitter' => 'Официальный твиттер Юпи!', 'Error' => 'Ошибка', 'Error!' => 'Ошибка!', 'GO TO CONTROL PANEL' => 'ПЕРЕЙТИ В ПАНЕЛЬ УПРАВЛЕНИЯ', 'GO TO SITE HOME PAGE' => 'ПЕРЕЙТИ НА САЙТ', 'Passwords are not consistent' => 'Пароли не совпадают!', 'Password' => 'Пароль', 'Admin password' => 'Пароль администратора сайта.', 'DB access password' => 'Пароль для доступа к указанной БД', 'Mail us to <b><a href="mailto:[email protected]">[email protected]</a></b>' => 'Пишите нам на <b><a href="mailto:[email protected]">[email protected]</a></b>', 'Password confirm' => 'Подтверждение пароля', 'Admin password confirm' => 'Подтверждение пароля администратора сайта.', 'Please check modules you want to be installed.' => 'Пожалуйста, выберите модули, которые хотите установить.', 'Please select hard password with digits, alphas and special symbols.' => 'Пожалуйста, указывайте сложный пароль, содержащий как цифры и буквы, так и специальные символы.', 'Congratulations Yupe was installed successfully!' => 'Поздравляем, установка "Юпи!" завершена!', 'Congratulations, modules which you checked installed successfully!' => 'Поздравляем, установка выбранных вами модулей завершена.<br/>Вы можете ознакомиться с журналом или продолжить установку.', 'Interesting links:' => 'Полезные ссылки:', 'User' => 'Пользователь', 'User for access to selected DB' => 'Пользователь для доступа к указанной БД', 'Port' => 'Порт', 'DBMS server port' => 'Порт СУБД сервера', 'Warning' => 'Предупреждение', 'Table prefix, "yupe_" by defaults' => 'Префикс добавляется в начале имени каждой таблицы, по умолчанию "yupe_"', 'Tables prefix' => 'Префикс таблиц', 'If you have problems with installation, please check {link} or {feedback}' => 'При возникновении проблем с установкой, пожалуйста, посетите {link} или {feedback} !', 'If you have a problem with install, please go to {link}' => 'При возникновении проблем с установкой, пожалуйста, посетите вот {link}!', 'Check connection and continue >' => 'Проверить подключение и продолжить >', 'Continue >' => 'Продолжить >', 'Path to mysql' => 'Путь к mysql', 'РНР version' => 'Версия РНР', 'Result' => 'Результат', 'Recommended' => 'Рекомендованные', 'Module site:' => 'Сайт модуля:', 'Please follow installation instructions.' => 'Следуйте инструкциям установщика и все у Вас получится!', 'Look journal' => 'Смотреть журнал', 'Create admin account' => 'Создайте учетную запись администратора сайта.', 'Create DB on DBMS server' => 'Создать БД на сервере СУБД', 'Create DB' => 'Создать базу данных', 'Unix socket (if it need)' => 'Сокет (если необходимо)', 'Default backend (Admin CP) theme' => 'Тема оформления панели управления', 'Default frontend theme' => 'Тема оформления публичной части', 'DBMS type (Experimental)' => 'Тип сервера БД (экспериментальная возможность)', 'DBMS type' => 'Тип сервера базы данных', 'Only basic modules' => 'Только основные', 'You have "Insatll" module active! After install it need to be disabled!' => 'У Вас активирован модуль "Установщик", после установки системы его необходимо отключить!', 'More about SEO {link}' => 'Узнать больше о SEO-оптимизации можно {link}.', 'Select your site title, description and keywords for SEO.' => 'Укажите название Вашего сайта, его описание и ключевые слова, необходимые для SEO-оптимизации.', 'Select DB connection settings' => 'Укажите параметры соединения с базой данных.', 'Installing module' => 'Устанавливаем модуль', 'Install' => 'Установка', 'Installation completed' => 'Установка завершена', 'Module was installed' => 'Установлен модуль', 'Installer' => 'Установщик', 'File {file} not exists or not accessible for write!' => 'Файл {file} не существует или не доступен для записи!', 'Support Yupe forum' => 'Форум поддержки Юпи!', 'Host' => 'Хост', 'our forum' => 'наш форум', 'contact us' => 'напишите нам', 'Step 1 of 8: Welcome!' => 'Шаг 1 из 8 : "Приветствие!"', 'Step 2 of 8: Environment check!' => 'Шаг 2 из 8 : "Проверка окружения!"', 'Step 3 of 8: System requirements' => 'Шаг 3 из 8 : "Проверка системных требований"', 'Step 4 of 8: DB settings' => 'Шаг 4 из 8 : "Соединение с базой данных"', 'Step 5 of 8: Installing modules' => 'Шаг 5 из 8 : "Установка модулей"', 'Step 6 of 8: Creating administrator' => 'Шаг 6 из 8 : "Создание учетной записи администратора"', 'Step 7 of 8: Project settings' => 'Шаг 7 из 8 : "Настройки проекта"', 'Step 8 of 8: Finish' => 'Шаг 8 из 8 : "Окончание установки"', 'Yupe!' => 'Юпи!', 'Yupe!, yupe, cms, yii' => 'Юпи!, yupe, цмс, yii', 'Yupe! - the fastest way to create a site build on top of Yiiframework!' => 'Юпи! - самый простой способ создать сайт на Yii!', 'Yupe try to create DB if it doesn\'t exists.' => 'Юпи! попытается сам создать базу данных если вы поставите галочку "Создать базу данных"', 'here' => 'вот здесь', 'go frequently' => 'заходите чаще =)', 'or' => 'или', 'interesing thoughts and ideas' => 'интересные мысли и идеи', 'Follow us' => 'обязательно заффоловьте нас, мы не спамим', 'We always open for commercial and other propositions' => 'принимаем всякого рода коммерческие и любые предложения', 'Send pull request' => 'пришлите нам парочку пулл-реквестов, все только выиграют', 'Zend OPcache required to optimize and speed up your project.' => 'Zend OPcache необходим для оптимизации и ускорения работы вашего проекта', 'General sponsor' => 'Генеральный спонсор', 'All discussions here' => '', 'Just good guys' => 'Просто отличные парни', 'Please, select your language below for continue.' => 'Пожалуйста, выберите язык ниже для продолжения.', 'This option is experiment. Only MySQL works stable.' => 'Это экспериментальная возможность. На данный момент гарантирована поддержка MySQL.', );
Java
# Support ## Timer A timer class that can be called static or dynamically. Source code: [support/blob/master/src/Timer.php](https://github.com/antonioribeiro/support/blob/master/src/Timer.php) ### Methods Those are the methods: Timer::start(); Timer::stop(); Timer::isStarted(); Timer::isStopped(); Timer::elapsed(); // returns a formatted value 9.0192 Timer::elapsedRaw(); // returns a double 9.019223049023 Timer::setFormat(default = '%.4f'); You can name your timers and have more than one running: Timer::start('mary'); Timer::stop('mary'); Timer::elapsed('mary'); ### Examples Timer::start(); Timer::start('2nd timer'); var_dump("started: " . (Timer::isStarted() ? 'yes' : 'no')); var_dump("stopped: " . (Timer::isStopped() ? 'yes' : 'no')); sleep(5); Timer::stop(); var_dump("started: " . (Timer::isStarted() ? 'yes' : 'no')); var_dump("stopped: " . (Timer::isStopped() ? 'yes' : 'no')); var_dump("elapsed: " . Timer::elapsed()); var_dump("raw: " . Timer::elapsedRaw()); sleep(2); var_dump("'2nd timer' started: " . (Timer::isStarted('2nd timer') ? 'yes' : 'no')); var_dump("'2nd timer' stopped: " . (Timer::isStopped('2nd timer') ? 'yes' : 'no')); var_dump("'2nd timer' elapsed: " . Timer::elapsed('2nd timer')); var_dump("'2nd timer' raw: " . Timer::elapsedRaw('2nd timer')); sleep(2); Timer::stop('2nd timer'); var_dump("'2nd timer' started: " . (Timer::isStarted('2nd timer') ? 'yes' : 'no')); var_dump("'2nd timer' stopped: " . (Timer::isStopped('2nd timer') ? 'yes' : 'no')); var_dump("'2nd timer' elapsed: " . Timer::elapsed('2nd timer')); var_dump("'2nd timer' raw: " . Timer::elapsedRaw('2nd timer')); Timer::setFormat('%.8f'); var_dump("'2nd timer' elapsed 8 decimals: " . Timer::elapsed('2nd timer')); /// And you can instantiate it and do it all over again: $t = new Timer; $t->start(); sleep(3); $t->stop(); var_dump("elapsed dynamic: " . $t->elapsed()); This should give you this result: string(12) "started: yes" string(11) "stopped: no" string(11) "started: no" string(12) "stopped: yes" string(15) "elapsed: 5.0004" string(20) "raw: 5.0005040168762" string(24) "'2nd timer' started: yes" string(23) "'2nd timer' stopped: no" string(27) "'2nd timer' elapsed: 7.0008" string(32) "'2nd timer' raw: 7.0008120536804" string(23) "'2nd timer' started: no" string(24) "'2nd timer' stopped: yes" string(27) "'2nd timer' elapsed: 9.0011" string(32) "'2nd timer' raw: 9.0010931491852" string(42) "'2nd timer' elapsed 8 decimals: 9.00113106" string(27) "elapsed dynamic: 3.00018883"
Java
//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/CodeGen/CodeGenAction.h" #include "CodeGenModule.h" #include "CoverageMappingGen.h" #include "MacroPPCallbacks.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclGroup.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Linker/Linker.h" #include "llvm/Pass.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/Timer.h" #include "llvm/Support/ToolOutputFile.h" #include "llvm/Support/YAMLTraits.h" #include "llvm/Transforms/IPO/Internalize.h" #include <memory> using namespace clang; using namespace llvm; namespace clang { class BackendConsumer; class ClangDiagnosticHandler final : public DiagnosticHandler { public: ClangDiagnosticHandler(const CodeGenOptions &CGOpts, BackendConsumer *BCon) : CodeGenOpts(CGOpts), BackendCon(BCon) {} bool handleDiagnostics(const DiagnosticInfo &DI) override; bool isAnalysisRemarkEnabled(StringRef PassName) const override { return (CodeGenOpts.OptimizationRemarkAnalysisPattern && CodeGenOpts.OptimizationRemarkAnalysisPattern->match(PassName)); } bool isMissedOptRemarkEnabled(StringRef PassName) const override { return (CodeGenOpts.OptimizationRemarkMissedPattern && CodeGenOpts.OptimizationRemarkMissedPattern->match(PassName)); } bool isPassedOptRemarkEnabled(StringRef PassName) const override { return (CodeGenOpts.OptimizationRemarkPattern && CodeGenOpts.OptimizationRemarkPattern->match(PassName)); } bool isAnyRemarkEnabled() const override { return (CodeGenOpts.OptimizationRemarkAnalysisPattern || CodeGenOpts.OptimizationRemarkMissedPattern || CodeGenOpts.OptimizationRemarkPattern); } private: const CodeGenOptions &CodeGenOpts; BackendConsumer *BackendCon; }; class BackendConsumer : public ASTConsumer { using LinkModule = CodeGenAction::LinkModule; virtual void anchor(); DiagnosticsEngine &Diags; BackendAction Action; const HeaderSearchOptions &HeaderSearchOpts; const CodeGenOptions &CodeGenOpts; const TargetOptions &TargetOpts; const LangOptions &LangOpts; std::unique_ptr<raw_pwrite_stream> AsmOutStream; ASTContext *Context; Timer LLVMIRGeneration; unsigned LLVMIRGenerationRefCount; /// True if we've finished generating IR. This prevents us from generating /// additional LLVM IR after emitting output in HandleTranslationUnit. This /// can happen when Clang plugins trigger additional AST deserialization. bool IRGenFinished = false; std::unique_ptr<CodeGenerator> Gen; SmallVector<LinkModule, 4> LinkModules; // This is here so that the diagnostic printer knows the module a diagnostic // refers to. llvm::Module *CurLinkModule = nullptr; public: BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags, const HeaderSearchOptions &HeaderSearchOpts, const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts, const TargetOptions &TargetOpts, const LangOptions &LangOpts, bool TimePasses, const std::string &InFile, SmallVector<LinkModule, 4> LinkModules, std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C, CoverageSourceInfo *CoverageInfo = nullptr) : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts), CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts), AsmOutStream(std::move(OS)), Context(nullptr), LLVMIRGeneration("irgen", "LLVM IR Generation Time"), LLVMIRGenerationRefCount(0), Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts, CodeGenOpts, C, CoverageInfo)), LinkModules(std::move(LinkModules)) { FrontendTimesIsEnabled = TimePasses; llvm::TimePassesIsEnabled = TimePasses; } llvm::Module *getModule() const { return Gen->GetModule(); } std::unique_ptr<llvm::Module> takeModule() { return std::unique_ptr<llvm::Module>(Gen->ReleaseModule()); } CodeGenerator *getCodeGenerator() { return Gen.get(); } void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override { Gen->HandleCXXStaticMemberVarInstantiation(VD); } void Initialize(ASTContext &Ctx) override { assert(!Context && "initialized multiple times"); Context = &Ctx; if (FrontendTimesIsEnabled) LLVMIRGeneration.startTimer(); Gen->Initialize(Ctx); if (FrontendTimesIsEnabled) LLVMIRGeneration.stopTimer(); } bool HandleTopLevelDecl(DeclGroupRef D) override { PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), Context->getSourceManager(), "LLVM IR generation of declaration"); // Recurse. if (FrontendTimesIsEnabled) { LLVMIRGenerationRefCount += 1; if (LLVMIRGenerationRefCount == 1) LLVMIRGeneration.startTimer(); } Gen->HandleTopLevelDecl(D); if (FrontendTimesIsEnabled) { LLVMIRGenerationRefCount -= 1; if (LLVMIRGenerationRefCount == 0) LLVMIRGeneration.stopTimer(); } return true; } void HandleInlineFunctionDefinition(FunctionDecl *D) override { PrettyStackTraceDecl CrashInfo(D, SourceLocation(), Context->getSourceManager(), "LLVM IR generation of inline function"); if (FrontendTimesIsEnabled) LLVMIRGeneration.startTimer(); Gen->HandleInlineFunctionDefinition(D); if (FrontendTimesIsEnabled) LLVMIRGeneration.stopTimer(); } void HandleInterestingDecl(DeclGroupRef D) override { // Ignore interesting decls from the AST reader after IRGen is finished. if (!IRGenFinished) HandleTopLevelDecl(D); } // Links each entry in LinkModules into our module. Returns true on error. bool LinkInModules() { for (auto &LM : LinkModules) { if (LM.PropagateAttrs) for (Function &F : *LM.Module) Gen->CGM().AddDefaultFnAttrs(F); CurLinkModule = LM.Module.get(); bool Err; if (LM.Internalize) { Err = Linker::linkModules( *getModule(), std::move(LM.Module), LM.LinkFlags, [](llvm::Module &M, const llvm::StringSet<> &GVS) { internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) { return !GV.hasName() || (GVS.count(GV.getName()) == 0); }); }); } else { Err = Linker::linkModules(*getModule(), std::move(LM.Module), LM.LinkFlags); } if (Err) return true; } return false; // success } void HandleTranslationUnit(ASTContext &C) override { { PrettyStackTraceString CrashInfo("Per-file LLVM IR generation"); if (FrontendTimesIsEnabled) { LLVMIRGenerationRefCount += 1; if (LLVMIRGenerationRefCount == 1) LLVMIRGeneration.startTimer(); } Gen->HandleTranslationUnit(C); if (FrontendTimesIsEnabled) { LLVMIRGenerationRefCount -= 1; if (LLVMIRGenerationRefCount == 0) LLVMIRGeneration.stopTimer(); } IRGenFinished = true; } // Silently ignore if we weren't initialized for some reason. if (!getModule()) return; // Install an inline asm handler so that diagnostics get printed through // our diagnostics hooks. LLVMContext &Ctx = getModule()->getContext(); LLVMContext::InlineAsmDiagHandlerTy OldHandler = Ctx.getInlineAsmDiagnosticHandler(); void *OldContext = Ctx.getInlineAsmDiagnosticContext(); Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this); std::unique_ptr<DiagnosticHandler> OldDiagnosticHandler = Ctx.getDiagnosticHandler(); Ctx.setDiagnosticHandler(llvm::make_unique<ClangDiagnosticHandler>( CodeGenOpts, this)); Ctx.setDiagnosticsHotnessRequested(CodeGenOpts.DiagnosticsWithHotness); if (CodeGenOpts.DiagnosticsHotnessThreshold != 0) Ctx.setDiagnosticsHotnessThreshold( CodeGenOpts.DiagnosticsHotnessThreshold); std::unique_ptr<llvm::ToolOutputFile> OptRecordFile; if (!CodeGenOpts.OptRecordFile.empty()) { std::error_code EC; OptRecordFile = llvm::make_unique<llvm::ToolOutputFile>( CodeGenOpts.OptRecordFile, EC, sys::fs::F_None); if (EC) { Diags.Report(diag::err_cannot_open_file) << CodeGenOpts.OptRecordFile << EC.message(); return; } Ctx.setDiagnosticsOutputFile( llvm::make_unique<yaml::Output>(OptRecordFile->os())); if (CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone) Ctx.setDiagnosticsHotnessRequested(true); } // Link each LinkModule into our module. if (LinkInModules()) return; EmbedBitcode(getModule(), CodeGenOpts, llvm::MemoryBufferRef()); EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts, LangOpts, C.getTargetInfo().getDataLayout(), getModule(), Action, std::move(AsmOutStream)); Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext); Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler)); if (OptRecordFile) OptRecordFile->keep(); } void HandleTagDeclDefinition(TagDecl *D) override { PrettyStackTraceDecl CrashInfo(D, SourceLocation(), Context->getSourceManager(), "LLVM IR generation of declaration"); Gen->HandleTagDeclDefinition(D); } void HandleTagDeclRequiredDefinition(const TagDecl *D) override { Gen->HandleTagDeclRequiredDefinition(D); } void CompleteTentativeDefinition(VarDecl *D) override { Gen->CompleteTentativeDefinition(D); } void AssignInheritanceModel(CXXRecordDecl *RD) override { Gen->AssignInheritanceModel(RD); } void HandleVTable(CXXRecordDecl *RD) override { Gen->HandleVTable(RD); } static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context, unsigned LocCookie) { SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie); ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc); } /// Get the best possible source location to represent a diagnostic that /// may have associated debug info. const FullSourceLoc getBestLocationFromDebugLoc(const llvm::DiagnosticInfoWithLocationBase &D, bool &BadDebugInfo, StringRef &Filename, unsigned &Line, unsigned &Column) const; void InlineAsmDiagHandler2(const llvm::SMDiagnostic &, SourceLocation LocCookie); void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI); /// Specialized handler for InlineAsm diagnostic. /// \return True if the diagnostic has been successfully reported, false /// otherwise. bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D); /// Specialized handler for StackSize diagnostic. /// \return True if the diagnostic has been successfully reported, false /// otherwise. bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D); /// Specialized handler for unsupported backend feature diagnostic. void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D); /// Specialized handlers for optimization remarks. /// Note that these handlers only accept remarks and they always handle /// them. void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID); void OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationBase &D); void OptimizationRemarkHandler( const llvm::OptimizationRemarkAnalysisFPCommute &D); void OptimizationRemarkHandler( const llvm::OptimizationRemarkAnalysisAliasing &D); void OptimizationFailureHandler( const llvm::DiagnosticInfoOptimizationFailure &D); }; void BackendConsumer::anchor() {} } bool ClangDiagnosticHandler::handleDiagnostics(const DiagnosticInfo &DI) { BackendCon->DiagnosticHandlerImpl(DI); return true; } /// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr /// buffer to be a valid FullSourceLoc. static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D, SourceManager &CSM) { // Get both the clang and llvm source managers. The location is relative to // a memory buffer that the LLVM Source Manager is handling, we need to add // a copy to the Clang source manager. const llvm::SourceMgr &LSM = *D.getSourceMgr(); // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr // already owns its one and clang::SourceManager wants to own its one. const MemoryBuffer *LBuf = LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc())); // Create the copy and transfer ownership to clang::SourceManager. // TODO: Avoid copying files into memory. std::unique_ptr<llvm::MemoryBuffer> CBuf = llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(), LBuf->getBufferIdentifier()); // FIXME: Keep a file ID map instead of creating new IDs for each location. FileID FID = CSM.createFileID(std::move(CBuf)); // Translate the offset into the file. unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart(); SourceLocation NewLoc = CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset); return FullSourceLoc(NewLoc, CSM); } /// InlineAsmDiagHandler2 - This function is invoked when the backend hits an /// error parsing inline asm. The SMDiagnostic indicates the error relative to /// the temporary memory buffer that the inline asm parser has set up. void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D, SourceLocation LocCookie) { // There are a couple of different kinds of errors we could get here. First, // we re-format the SMDiagnostic in terms of a clang diagnostic. // Strip "error: " off the start of the message string. StringRef Message = D.getMessage(); if (Message.startswith("error: ")) Message = Message.substr(7); // If the SMDiagnostic has an inline asm source location, translate it. FullSourceLoc Loc; if (D.getLoc() != SMLoc()) Loc = ConvertBackendLocation(D, Context->getSourceManager()); unsigned DiagID; switch (D.getKind()) { case llvm::SourceMgr::DK_Error: DiagID = diag::err_fe_inline_asm; break; case llvm::SourceMgr::DK_Warning: DiagID = diag::warn_fe_inline_asm; break; case llvm::SourceMgr::DK_Note: DiagID = diag::note_fe_inline_asm; break; case llvm::SourceMgr::DK_Remark: llvm_unreachable("remarks unexpected"); } // If this problem has clang-level source location information, report the // issue in the source with a note showing the instantiated // code. if (LocCookie.isValid()) { Diags.Report(LocCookie, DiagID).AddString(Message); if (D.getLoc().isValid()) { DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here); // Convert the SMDiagnostic ranges into SourceRange and attach them // to the diagnostic. for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) { unsigned Column = D.getColumnNo(); B << SourceRange(Loc.getLocWithOffset(Range.first - Column), Loc.getLocWithOffset(Range.second - Column)); } } return; } // Otherwise, report the backend issue as occurring in the generated .s file. // If Loc is invalid, we still need to report the issue, it just gets no // location info. Diags.Report(Loc, DiagID).AddString(Message); } #define ComputeDiagID(Severity, GroupName, DiagID) \ do { \ switch (Severity) { \ case llvm::DS_Error: \ DiagID = diag::err_fe_##GroupName; \ break; \ case llvm::DS_Warning: \ DiagID = diag::warn_fe_##GroupName; \ break; \ case llvm::DS_Remark: \ llvm_unreachable("'remark' severity not expected"); \ break; \ case llvm::DS_Note: \ DiagID = diag::note_fe_##GroupName; \ break; \ } \ } while (false) #define ComputeDiagRemarkID(Severity, GroupName, DiagID) \ do { \ switch (Severity) { \ case llvm::DS_Error: \ DiagID = diag::err_fe_##GroupName; \ break; \ case llvm::DS_Warning: \ DiagID = diag::warn_fe_##GroupName; \ break; \ case llvm::DS_Remark: \ DiagID = diag::remark_fe_##GroupName; \ break; \ case llvm::DS_Note: \ DiagID = diag::note_fe_##GroupName; \ break; \ } \ } while (false) bool BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) { unsigned DiagID; ComputeDiagID(D.getSeverity(), inline_asm, DiagID); std::string Message = D.getMsgStr().str(); // If this problem has clang-level source location information, report the // issue as being a problem in the source with a note showing the instantiated // code. SourceLocation LocCookie = SourceLocation::getFromRawEncoding(D.getLocCookie()); if (LocCookie.isValid()) Diags.Report(LocCookie, DiagID).AddString(Message); else { // Otherwise, report the backend diagnostic as occurring in the generated // .s file. // If Loc is invalid, we still need to report the diagnostic, it just gets // no location info. FullSourceLoc Loc; Diags.Report(Loc, DiagID).AddString(Message); } // We handled all the possible severities. return true; } bool BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) { if (D.getSeverity() != llvm::DS_Warning) // For now, the only support we have for StackSize diagnostic is warning. // We do not know how to format other severities. return false; if (const Decl *ND = Gen->GetDeclForMangledName(D.getFunction().getName())) { // FIXME: Shouldn't need to truncate to uint32_t Diags.Report(ND->getASTContext().getFullLoc(ND->getLocation()), diag::warn_fe_frame_larger_than) << static_cast<uint32_t>(D.getStackSize()) << Decl::castToDeclContext(ND); return true; } return false; } const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc( const llvm::DiagnosticInfoWithLocationBase &D, bool &BadDebugInfo, StringRef &Filename, unsigned &Line, unsigned &Column) const { SourceManager &SourceMgr = Context->getSourceManager(); FileManager &FileMgr = SourceMgr.getFileManager(); SourceLocation DILoc; if (D.isLocationAvailable()) { D.getLocation(&Filename, &Line, &Column); const FileEntry *FE = FileMgr.getFile(Filename); if (FE && Line > 0) { // If -gcolumn-info was not used, Column will be 0. This upsets the // source manager, so pass 1 if Column is not set. DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1); } BadDebugInfo = DILoc.isInvalid(); } // If a location isn't available, try to approximate it using the associated // function definition. We use the definition's right brace to differentiate // from diagnostics that genuinely relate to the function itself. FullSourceLoc Loc(DILoc, SourceMgr); if (Loc.isInvalid()) if (const Decl *FD = Gen->GetDeclForMangledName(D.getFunction().getName())) Loc = FD->getASTContext().getFullLoc(FD->getLocation()); if (DILoc.isInvalid() && D.isLocationAvailable()) // If we were not able to translate the file:line:col information // back to a SourceLocation, at least emit a note stating that // we could not translate this location. This can happen in the // case of #line directives. Diags.Report(Loc, diag::note_fe_backend_invalid_loc) << Filename << Line << Column; return Loc; } void BackendConsumer::UnsupportedDiagHandler( const llvm::DiagnosticInfoUnsupported &D) { // We only support errors. assert(D.getSeverity() == llvm::DS_Error); StringRef Filename; unsigned Line, Column; bool BadDebugInfo = false; FullSourceLoc Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column); Diags.Report(Loc, diag::err_fe_backend_unsupported) << D.getMessage().str(); if (BadDebugInfo) // If we were not able to translate the file:line:col information // back to a SourceLocation, at least emit a note stating that // we could not translate this location. This can happen in the // case of #line directives. Diags.Report(Loc, diag::note_fe_backend_invalid_loc) << Filename << Line << Column; } void BackendConsumer::EmitOptimizationMessage( const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID) { // We only support warnings and remarks. assert(D.getSeverity() == llvm::DS_Remark || D.getSeverity() == llvm::DS_Warning); StringRef Filename; unsigned Line, Column; bool BadDebugInfo = false; FullSourceLoc Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column); std::string Msg; raw_string_ostream MsgStream(Msg); MsgStream << D.getMsg(); if (D.getHotness()) MsgStream << " (hotness: " << *D.getHotness() << ")"; Diags.Report(Loc, DiagID) << AddFlagValue(D.getPassName()) << MsgStream.str(); if (BadDebugInfo) // If we were not able to translate the file:line:col information // back to a SourceLocation, at least emit a note stating that // we could not translate this location. This can happen in the // case of #line directives. Diags.Report(Loc, diag::note_fe_backend_invalid_loc) << Filename << Line << Column; } void BackendConsumer::OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationBase &D) { // Without hotness information, don't show noisy remarks. if (D.isVerbose() && !D.getHotness()) return; if (D.isPassed()) { // Optimization remarks are active only if the -Rpass flag has a regular // expression that matches the name of the pass name in \p D. if (CodeGenOpts.OptimizationRemarkPattern && CodeGenOpts.OptimizationRemarkPattern->match(D.getPassName())) EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark); } else if (D.isMissed()) { // Missed optimization remarks are active only if the -Rpass-missed // flag has a regular expression that matches the name of the pass // name in \p D. if (CodeGenOpts.OptimizationRemarkMissedPattern && CodeGenOpts.OptimizationRemarkMissedPattern->match(D.getPassName())) EmitOptimizationMessage( D, diag::remark_fe_backend_optimization_remark_missed); } else { assert(D.isAnalysis() && "Unknown remark type"); bool ShouldAlwaysPrint = false; if (auto *ORA = dyn_cast<llvm::OptimizationRemarkAnalysis>(&D)) ShouldAlwaysPrint = ORA->shouldAlwaysPrint(); if (ShouldAlwaysPrint || (CodeGenOpts.OptimizationRemarkAnalysisPattern && CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName()))) EmitOptimizationMessage( D, diag::remark_fe_backend_optimization_remark_analysis); } } void BackendConsumer::OptimizationRemarkHandler( const llvm::OptimizationRemarkAnalysisFPCommute &D) { // Optimization analysis remarks are active if the pass name is set to // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a // regular expression that matches the name of the pass name in \p D. if (D.shouldAlwaysPrint() || (CodeGenOpts.OptimizationRemarkAnalysisPattern && CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName()))) EmitOptimizationMessage( D, diag::remark_fe_backend_optimization_remark_analysis_fpcommute); } void BackendConsumer::OptimizationRemarkHandler( const llvm::OptimizationRemarkAnalysisAliasing &D) { // Optimization analysis remarks are active if the pass name is set to // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a // regular expression that matches the name of the pass name in \p D. if (D.shouldAlwaysPrint() || (CodeGenOpts.OptimizationRemarkAnalysisPattern && CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName()))) EmitOptimizationMessage( D, diag::remark_fe_backend_optimization_remark_analysis_aliasing); } void BackendConsumer::OptimizationFailureHandler( const llvm::DiagnosticInfoOptimizationFailure &D) { EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure); } /// This function is invoked when the backend needs /// to report something to the user. void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) { unsigned DiagID = diag::err_fe_inline_asm; llvm::DiagnosticSeverity Severity = DI.getSeverity(); // Get the diagnostic ID based. switch (DI.getKind()) { case llvm::DK_InlineAsm: if (InlineAsmDiagHandler(cast<DiagnosticInfoInlineAsm>(DI))) return; ComputeDiagID(Severity, inline_asm, DiagID); break; case llvm::DK_StackSize: if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI))) return; ComputeDiagID(Severity, backend_frame_larger_than, DiagID); break; case DK_Linker: assert(CurLinkModule); // FIXME: stop eating the warnings and notes. if (Severity != DS_Error) return; DiagID = diag::err_fe_cannot_link_module; break; case llvm::DK_OptimizationRemark: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<OptimizationRemark>(DI)); return; case llvm::DK_OptimizationRemarkMissed: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<OptimizationRemarkMissed>(DI)); return; case llvm::DK_OptimizationRemarkAnalysis: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<OptimizationRemarkAnalysis>(DI)); return; case llvm::DK_OptimizationRemarkAnalysisFPCommute: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisFPCommute>(DI)); return; case llvm::DK_OptimizationRemarkAnalysisAliasing: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisAliasing>(DI)); return; case llvm::DK_MachineOptimizationRemark: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<MachineOptimizationRemark>(DI)); return; case llvm::DK_MachineOptimizationRemarkMissed: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<MachineOptimizationRemarkMissed>(DI)); return; case llvm::DK_MachineOptimizationRemarkAnalysis: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<MachineOptimizationRemarkAnalysis>(DI)); return; case llvm::DK_OptimizationFailure: // Optimization failures are always handled completely by this // handler. OptimizationFailureHandler(cast<DiagnosticInfoOptimizationFailure>(DI)); return; case llvm::DK_Unsupported: UnsupportedDiagHandler(cast<DiagnosticInfoUnsupported>(DI)); return; default: // Plugin IDs are not bound to any value as they are set dynamically. ComputeDiagRemarkID(Severity, backend_plugin, DiagID); break; } std::string MsgStorage; { raw_string_ostream Stream(MsgStorage); DiagnosticPrinterRawOStream DP(Stream); DI.print(DP); } if (DiagID == diag::err_fe_cannot_link_module) { Diags.Report(diag::err_fe_cannot_link_module) << CurLinkModule->getModuleIdentifier() << MsgStorage; return; } // Report the backend message using the usual diagnostic mechanism. FullSourceLoc Loc; Diags.Report(Loc, DiagID).AddString(MsgStorage); } #undef ComputeDiagID CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext) : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext), OwnsVMContext(!_VMContext) {} CodeGenAction::~CodeGenAction() { TheModule.reset(); if (OwnsVMContext) delete VMContext; } bool CodeGenAction::hasIRSupport() const { return true; } void CodeGenAction::EndSourceFileAction() { // If the consumer creation failed, do nothing. if (!getCompilerInstance().hasASTConsumer()) return; // Steal the module from the consumer. TheModule = BEConsumer->takeModule(); } std::unique_ptr<llvm::Module> CodeGenAction::takeModule() { return std::move(TheModule); } llvm::LLVMContext *CodeGenAction::takeLLVMContext() { OwnsVMContext = false; return VMContext; } static std::unique_ptr<raw_pwrite_stream> GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) { switch (Action) { case Backend_EmitAssembly: return CI.createDefaultOutputFile(false, InFile, "s"); case Backend_EmitLL: return CI.createDefaultOutputFile(false, InFile, "ll"); case Backend_EmitBC: return CI.createDefaultOutputFile(true, InFile, "bc"); case Backend_EmitNothing: return nullptr; case Backend_EmitMCNull: return CI.createNullOutputFile(); case Backend_EmitObj: return CI.createDefaultOutputFile(true, InFile, "o"); } llvm_unreachable("Invalid action!"); } std::unique_ptr<ASTConsumer> CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { BackendAction BA = static_cast<BackendAction>(Act); std::unique_ptr<raw_pwrite_stream> OS = CI.takeOutputStream(); if (!OS) OS = GetOutputStream(CI, InFile, BA); if (BA != Backend_EmitNothing && !OS) return nullptr; // Load bitcode modules to link with, if we need to. if (LinkModules.empty()) for (const CodeGenOptions::BitcodeFileToLink &F : CI.getCodeGenOpts().LinkBitcodeFiles) { auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename); if (!BCBuf) { CI.getDiagnostics().Report(diag::err_cannot_open_file) << F.Filename << BCBuf.getError().message(); LinkModules.clear(); return nullptr; } Expected<std::unique_ptr<llvm::Module>> ModuleOrErr = getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext); if (!ModuleOrErr) { handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) { CI.getDiagnostics().Report(diag::err_cannot_open_file) << F.Filename << EIB.message(); }); LinkModules.clear(); return nullptr; } LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs, F.Internalize, F.LinkFlags}); } CoverageSourceInfo *CoverageInfo = nullptr; // Add the preprocessor callback only when the coverage mapping is generated. if (CI.getCodeGenOpts().CoverageMapping) { CoverageInfo = new CoverageSourceInfo; CI.getPreprocessor().addPPCallbacks( std::unique_ptr<PPCallbacks>(CoverageInfo)); } std::unique_ptr<BackendConsumer> Result(new BackendConsumer( BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(), CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile, std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo)); BEConsumer = Result.get(); // Enable generating macro debug info only when debug info is not disabled and // also macro debug info is enabled. if (CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo && CI.getCodeGenOpts().MacroDebugInfo) { std::unique_ptr<PPCallbacks> Callbacks = llvm::make_unique<MacroPPCallbacks>(BEConsumer->getCodeGenerator(), CI.getPreprocessor()); CI.getPreprocessor().addPPCallbacks(std::move(Callbacks)); } return std::move(Result); } static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM, void *Context, unsigned LocCookie) { SM.print(nullptr, llvm::errs()); auto Diags = static_cast<DiagnosticsEngine *>(Context); unsigned DiagID; switch (SM.getKind()) { case llvm::SourceMgr::DK_Error: DiagID = diag::err_fe_inline_asm; break; case llvm::SourceMgr::DK_Warning: DiagID = diag::warn_fe_inline_asm; break; case llvm::SourceMgr::DK_Note: DiagID = diag::note_fe_inline_asm; break; case llvm::SourceMgr::DK_Remark: llvm_unreachable("remarks unexpected"); } Diags->Report(DiagID).AddString("cannot compile inline asm"); } std::unique_ptr<llvm::Module> CodeGenAction::loadModule(MemoryBufferRef MBRef) { CompilerInstance &CI = getCompilerInstance(); SourceManager &SM = CI.getSourceManager(); // For ThinLTO backend invocations, ensure that the context // merges types based on ODR identifiers. We also need to read // the correct module out of a multi-module bitcode file. if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) { VMContext->enableDebugTypeODRUniquing(); auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> { unsigned DiagID = CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0"); handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) { CI.getDiagnostics().Report(DiagID) << EIB.message(); }); return {}; }; Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef); if (!BMsOrErr) return DiagErrors(BMsOrErr.takeError()); BitcodeModule *Bm = FindThinLTOModule(*BMsOrErr); // We have nothing to do if the file contains no ThinLTO module. This is // possible if ThinLTO compilation was not able to split module. Content of // the file was already processed by indexing and will be passed to the // linker using merged object file. if (!Bm) { auto M = llvm::make_unique<llvm::Module>("empty", *VMContext); M->setTargetTriple(CI.getTargetOpts().Triple); return M; } Expected<std::unique_ptr<llvm::Module>> MOrErr = Bm->parseModule(*VMContext); if (!MOrErr) return DiagErrors(MOrErr.takeError()); return std::move(*MOrErr); } llvm::SMDiagnostic Err; if (std::unique_ptr<llvm::Module> M = parseIR(MBRef, Err, *VMContext)) return M; // Translate from the diagnostic info to the SourceManager location if // available. // TODO: Unify this with ConvertBackendLocation() SourceLocation Loc; if (Err.getLineNo() > 0) { assert(Err.getColumnNo() >= 0); Loc = SM.translateFileLineCol(SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(), Err.getColumnNo() + 1); } // Strip off a leading diagnostic code if there is one. StringRef Msg = Err.getMessage(); if (Msg.startswith("error: ")) Msg = Msg.substr(7); unsigned DiagID = CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0"); CI.getDiagnostics().Report(Loc, DiagID) << Msg; return {}; } void CodeGenAction::ExecuteAction() { // If this is an IR file, we have to treat it specially. if (getCurrentFileKind().getLanguage() == InputKind::LLVM_IR) { BackendAction BA = static_cast<BackendAction>(Act); CompilerInstance &CI = getCompilerInstance(); std::unique_ptr<raw_pwrite_stream> OS = GetOutputStream(CI, getCurrentFile(), BA); if (BA != Backend_EmitNothing && !OS) return; bool Invalid; SourceManager &SM = CI.getSourceManager(); FileID FID = SM.getMainFileID(); llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid); if (Invalid) return; TheModule = loadModule(*MainFile); if (!TheModule) return; const TargetOptions &TargetOpts = CI.getTargetOpts(); if (TheModule->getTargetTriple() != TargetOpts.Triple) { CI.getDiagnostics().Report(SourceLocation(), diag::warn_fe_override_module) << TargetOpts.Triple; TheModule->setTargetTriple(TargetOpts.Triple); } EmbedBitcode(TheModule.get(), CI.getCodeGenOpts(), MainFile->getMemBufferRef()); LLVMContext &Ctx = TheModule->getContext(); Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler, &CI.getDiagnostics()); EmitBackendOutput(CI.getDiagnostics(), CI.getHeaderSearchOpts(), CI.getCodeGenOpts(), TargetOpts, CI.getLangOpts(), CI.getTarget().getDataLayout(), TheModule.get(), BA, std::move(OS)); return; } // Otherwise follow the normal AST path. this->ASTFrontendAction::ExecuteAction(); } // void EmitAssemblyAction::anchor() { } EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitAssembly, _VMContext) {} void EmitBCAction::anchor() { } EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitBC, _VMContext) {} void EmitLLVMAction::anchor() { } EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitLL, _VMContext) {} void EmitLLVMOnlyAction::anchor() { } EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitNothing, _VMContext) {} void EmitCodeGenOnlyAction::anchor() { } EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitMCNull, _VMContext) {} void EmitObjAction::anchor() { } EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitObj, _VMContext) {}
Java
# frozen_string_literal: true module Stupidedi using Refinements module Schema # # The 5010 X12 "data segment requirement designator"s include # M - Mandatory # O - Optional # # @see X222 B.1.3.12.6 Data Segment Requirement Designators # # The HIPAA implementation guides "industry usage" include # SITUATIONAL # REQUIRED # class SegmentReq def initialize(required, forbidden, to_s) @required, @forbidden, @to_s = required, forbidden, to_s end def required? @required end def forbidden? @forbidden end def optional? not (@required or @forbidden) end # @return [void] def pretty_print(q) q.text @to_s end # @return [String] def inspect @to_s end end end end
Java
// Copyright 2010-2021, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "prediction/dictionary_predictor.h" #include <algorithm> #include <cctype> #include <climits> // INT_MAX #include <cmath> #include <cstdint> #include <list> #include <map> #include <set> #include <string> #include <utility> #include <vector> #include "base/japanese_util.h" #include "base/logging.h" #include "base/number_util.h" #include "base/util.h" #include "composer/composer.h" #include "converter/connector.h" #include "converter/converter_interface.h" #include "converter/immutable_converter_interface.h" #include "converter/node_list_builder.h" #include "converter/segmenter.h" #include "converter/segments.h" #include "dictionary/dictionary_interface.h" #include "dictionary/pos_matcher.h" #include "prediction/predictor_interface.h" #include "prediction/suggestion_filter.h" #include "prediction/zero_query_dict.h" #include "protocol/commands.pb.h" #include "protocol/config.pb.h" #include "request/conversion_request.h" #include "usage_stats/usage_stats.h" #include "absl/container/flat_hash_map.h" #include "absl/flags/flag.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" #ifndef NDEBUG #define MOZC_DEBUG #define MOZC_WORD_LOG_MESSAGE(message) \ absl::StrCat(__FILE__, ":", __LINE__, " ", message, "\n") #define MOZC_WORD_LOG(result, message) \ (result).log.append(MOZC_WORD_LOG_MESSAGE(message)) #else // NDEBUG #define MOZC_WORD_LOG(result, message) \ {} #endif // NDEBUG namespace mozc { namespace { using ::mozc::commands::Request; using ::mozc::dictionary::DictionaryInterface; using ::mozc::dictionary::PosMatcher; using ::mozc::dictionary::Token; using ::mozc::usage_stats::UsageStats; // Used to emulate positive infinity for cost. This value is set for those // candidates that are thought to be aggressive; thus we can eliminate such // candidates from suggestion or prediction. Note that for this purpose we don't // want to use INT_MAX because someone might further add penalty after cost is // set to INT_MAX, which leads to overflow and consequently aggressive // candidates would appear in the top results. constexpr int kInfinity = (2 << 20); // Note that PREDICTION mode is much slower than SUGGESTION. // Number of prediction calls should be minimized. constexpr size_t kSuggestionMaxResultsSize = 256; constexpr size_t kPredictionMaxResultsSize = 100000; bool IsEnableNewSpatialScoring(const ConversionRequest &request) { return request.request() .decoder_experiment_params() .enable_new_spatial_scoring(); } // Returns true if the |target| may be reduncant result. bool MaybeRedundant(const std::string &reference, const std::string &target) { return absl::StartsWith(target, reference); } bool IsLatinInputMode(const ConversionRequest &request) { return (request.has_composer() && (request.composer().GetInputMode() == transliteration::HALF_ASCII || request.composer().GetInputMode() == transliteration::FULL_ASCII)); } bool IsQwertyMobileTable(const ConversionRequest &request) { const auto table = request.request().special_romanji_table(); return (table == commands::Request::QWERTY_MOBILE_TO_HIRAGANA || table == commands::Request::QWERTY_MOBILE_TO_HALFWIDTHASCII); } bool IsLanguageAwareInputEnabled(const ConversionRequest &request) { const auto lang_aware = request.request().language_aware_input(); return lang_aware == commands::Request::LANGUAGE_AWARE_SUGGESTION; } // Returns true if |segments| contains number history. // Normalized number will be set to |number_key| // Note: // Now this function supports arabic number candidates only and // we don't support kanji number candidates for now. // This is because We have several kanji number styles, for example, // "一二", "十二", "壱拾弐", etc for 12. // TODO(toshiyuki): Define the spec and support Kanji. bool GetNumberHistory(const Segments &segments, std::string *number_key) { DCHECK(number_key); const size_t history_size = segments.history_segments_size(); if (history_size <= 0) { return false; } const Segment &last_segment = segments.history_segment(history_size - 1); DCHECK_GT(last_segment.candidates_size(), 0); const std::string &history_value = last_segment.candidate(0).value; if (!NumberUtil::IsArabicNumber(history_value)) { return false; } japanese_util::FullWidthToHalfWidth(history_value, number_key); return true; } bool IsMixedConversionEnabled(const commands::Request &request) { return request.mixed_conversion(); } bool IsTypingCorrectionEnabled(const ConversionRequest &request) { return request.config().use_typing_correction(); } bool HasHistoryKeyLongerThanOrEqualTo(const Segments &segments, size_t utf8_len) { const size_t history_segments_size = segments.history_segments_size(); if (history_segments_size == 0) { return false; } const Segment &history_segment = segments.history_segment(history_segments_size - 1); if (history_segment.candidates_size() == 0) { return false; } return Util::CharsLen(history_segment.candidate(0).key) >= utf8_len; } bool IsLongKeyForRealtimeCandidates(const Segments &segments) { constexpr int kFewResultThreshold = 8; return (segments.segments_size() > 0 && Util::CharsLen(segments.segment(0).key()) >= kFewResultThreshold); } size_t GetMaxSizeForRealtimeCandidates(const ConversionRequest &request, const Segments &segments, bool is_long_key) { const auto &segment = segments.conversion_segment(0); const size_t size = (request.max_dictionary_prediction_candidates_size() - segment.candidates_size()); return is_long_key ? std::min<size_t>(size, 8) : size; } size_t GetDefaultSizeForRealtimeCandidates(bool is_long_key) { return is_long_key ? 5 : 10; } ConversionRequest GetConversionRequestForRealtimeCandidates( const ConversionRequest &request, size_t realtime_candidates_size, size_t current_candidates_size) { ConversionRequest ret = request; ret.set_max_conversion_candidates_size(current_candidates_size + realtime_candidates_size); return ret; } } // namespace class DictionaryPredictor::PredictiveLookupCallback : public DictionaryInterface::Callback { public: PredictiveLookupCallback(DictionaryPredictor::PredictionTypes types, size_t limit, size_t original_key_len, const std::set<std::string> *subsequent_chars, Segment::Candidate::SourceInfo source_info, int unknown_id, absl::string_view non_expanded_original_key, const SpatialCostParams &spatial_cost_params, std::vector<DictionaryPredictor::Result> *results) : penalty_(0), types_(types), limit_(limit), original_key_len_(original_key_len), subsequent_chars_(subsequent_chars), source_info_(source_info), unknown_id_(unknown_id), non_expanded_original_key_(non_expanded_original_key), spatial_cost_params_(spatial_cost_params), results_(results) {} PredictiveLookupCallback(const PredictiveLookupCallback &) = delete; PredictiveLookupCallback &operator=(const PredictiveLookupCallback &) = delete; ResultType OnKey(absl::string_view key) override { if (subsequent_chars_ == nullptr) { return TRAVERSE_CONTINUE; } // If |subsequent_chars_| was provided, check if the substring of |key| // obtained by removing the original lookup key starts with a string in the // set. For example, if original key is "he" and "hello" was found, // continue traversing only when one of "l", "ll", or "llo" is in // |subsequent_chars_|. // Implementation note: Although absl::StartsWith is called at most N times // where N = subsequent_chars_.size(), N is very small in practice, less // than 10. Thus, this linear order algorithm is fast enough. // Theoretically, we can construct a trie of strings in |subsequent_chars_| // to get more performance but it's overkill here. // TODO(noriyukit): std::vector<string> would be better than set<string>. // To this end, we need to fix Comopser as well. const absl::string_view rest = absl::ClippedSubstr(key, original_key_len_); for (const std::string &chr : *subsequent_chars_) { if (absl::StartsWith(rest, chr)) { return TRAVERSE_CONTINUE; } } return TRAVERSE_NEXT_KEY; } ResultType OnActualKey(absl::string_view key, absl::string_view actual_key, int num_expanded) override { penalty_ = 0; if (num_expanded > 0 || (!non_expanded_original_key_.empty() && !absl::StartsWith(actual_key, non_expanded_original_key_))) { penalty_ = spatial_cost_params_.GetPenalty(key); } return TRAVERSE_CONTINUE; } ResultType OnToken(absl::string_view key, absl::string_view actual_key, const Token &token) override { // If the token is from user dictionary and its POS is unknown, it is // suggest-only words. Such words are looked up only when their keys // exactly match |key|. Otherwise, unigram suggestion can be annoying. For // example, suppose a user registers their email address as める. Then, // we don't want to show the email address from め but exactly from める. if ((token.attributes & Token::USER_DICTIONARY) != 0 && token.lid == unknown_id_) { const auto orig_key = absl::ClippedSubstr(key, 0, original_key_len_); if (token.key != orig_key) { return TRAVERSE_CONTINUE; } } results_->push_back(Result()); results_->back().InitializeByTokenAndTypes(token, types_); results_->back().wcost += penalty_; results_->back().source_info |= source_info_; return (results_->size() < limit_) ? TRAVERSE_CONTINUE : TRAVERSE_DONE; } protected: int32_t penalty_; const DictionaryPredictor::PredictionTypes types_; const size_t limit_; const size_t original_key_len_; const std::set<std::string> *subsequent_chars_; const Segment::Candidate::SourceInfo source_info_; const int unknown_id_; absl::string_view non_expanded_original_key_; const SpatialCostParams spatial_cost_params_; std::vector<DictionaryPredictor::Result> *results_; }; class DictionaryPredictor::PredictiveBigramLookupCallback : public PredictiveLookupCallback { public: PredictiveBigramLookupCallback( DictionaryPredictor::PredictionTypes types, size_t limit, size_t original_key_len, const std::set<std::string> *subsequent_chars, absl::string_view history_value, Segment::Candidate::SourceInfo source_info, int unknown_id, absl::string_view non_expanded_original_key, const SpatialCostParams spatial_cost_params, std::vector<DictionaryPredictor::Result> *results) : PredictiveLookupCallback(types, limit, original_key_len, subsequent_chars, source_info, unknown_id, non_expanded_original_key, spatial_cost_params, results), history_value_(history_value) {} PredictiveBigramLookupCallback(const PredictiveBigramLookupCallback &) = delete; PredictiveBigramLookupCallback &operator=( const PredictiveBigramLookupCallback &) = delete; ResultType OnToken(absl::string_view key, absl::string_view expanded_key, const Token &token) override { // Skip the token if its value doesn't start with the previous user input, // |history_value_|. if (!absl::StartsWith(token.value, history_value_) || token.value.size() <= history_value_.size()) { return TRAVERSE_CONTINUE; } ResultType result_type = PredictiveLookupCallback::OnToken(key, expanded_key, token); return result_type; } private: absl::string_view history_value_; }; // Comparator for sorting prediction candidates. // If we have words A and AB, for example "六本木" and "六本木ヒルズ", // assume that cost(A) < cost(AB). class DictionaryPredictor::ResultWCostLess { public: bool operator()(const DictionaryPredictor::Result &lhs, const DictionaryPredictor::Result &rhs) const { return lhs.wcost < rhs.wcost; } }; class DictionaryPredictor::ResultCostLess { public: bool operator()(const DictionaryPredictor::Result &lhs, const DictionaryPredictor::Result &rhs) const { return lhs.cost > rhs.cost; } }; DictionaryPredictor::DictionaryPredictor( const DataManagerInterface &data_manager, const ConverterInterface *converter, const ImmutableConverterInterface *immutable_converter, const DictionaryInterface *dictionary, const DictionaryInterface *suffix_dictionary, const Connector *connector, const Segmenter *segmenter, const PosMatcher *pos_matcher, const SuggestionFilter *suggestion_filter) : converter_(converter), immutable_converter_(immutable_converter), dictionary_(dictionary), suffix_dictionary_(suffix_dictionary), connector_(connector), segmenter_(segmenter), suggestion_filter_(suggestion_filter), counter_suffix_word_id_(pos_matcher->GetCounterSuffixWordId()), general_symbol_id_(pos_matcher->GetGeneralSymbolId()), unknown_id_(pos_matcher->GetUnknownId()), predictor_name_("DictionaryPredictor") { absl::string_view zero_query_token_array_data; absl::string_view zero_query_string_array_data; absl::string_view zero_query_number_token_array_data; absl::string_view zero_query_number_string_array_data; data_manager.GetZeroQueryData(&zero_query_token_array_data, &zero_query_string_array_data, &zero_query_number_token_array_data, &zero_query_number_string_array_data); zero_query_dict_.Init(zero_query_token_array_data, zero_query_string_array_data); zero_query_number_dict_.Init(zero_query_number_token_array_data, zero_query_number_string_array_data); } DictionaryPredictor::~DictionaryPredictor() {} void DictionaryPredictor::Finish(const ConversionRequest &request, Segments *segments) { if (segments->request_type() == Segments::REVERSE_CONVERSION) { // Do nothing for REVERSE_CONVERSION. return; } const Segment &segment = segments->conversion_segment(0); if (segment.candidates_size() < 1) { VLOG(2) << "candidates size < 1"; return; } const Segment::Candidate &candidate = segment.candidate(0); if (segment.segment_type() != Segment::FIXED_VALUE) { VLOG(2) << "segment is not FIXED_VALUE" << candidate.value; return; } MaybeRecordUsageStats(candidate); } void DictionaryPredictor::MaybeRecordUsageStats( const Segment::Candidate &candidate) const { if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_NONE) { UsageStats::IncrementCount("CommitDictionaryPredictorZeroQueryTypeNone"); } if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_NUMBER_SUFFIX) { UsageStats::IncrementCount( "CommitDictionaryPredictorZeroQueryTypeNumberSuffix"); } if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_EMOTICON) { UsageStats::IncrementCount( "CommitDictionaryPredictorZeroQueryTypeEmoticon"); } if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_EMOJI) { UsageStats::IncrementCount("CommitDictionaryPredictorZeroQueryTypeEmoji"); } if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_BIGRAM) { UsageStats::IncrementCount("CommitDictionaryPredictorZeroQueryTypeBigram"); } if (candidate.source_info & Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_SUFFIX) { UsageStats::IncrementCount("CommitDictionaryPredictorZeroQueryTypeSuffix"); } } bool DictionaryPredictor::PredictForRequest(const ConversionRequest &request, Segments *segments) const { if (segments == nullptr) { return false; } if (segments->request_type() == Segments::CONVERSION) { VLOG(2) << "request type is CONVERSION"; return false; } if (segments->conversion_segments_size() < 1) { VLOG(2) << "segment size < 1"; return false; } std::vector<Result> results; // Mixed conversion is the feature that mixes prediction and // conversion, meaning that results may include the candidates whose // key is exactly the same as the composition. This mode is used in mobile. const bool is_mixed_conversion = IsMixedConversionEnabled(request.request()); AggregatePredictionForRequest(request, segments, &results); if (results.empty()) { return false; } if (is_mixed_conversion) { SetPredictionCostForMixedConversion(*segments, &results); if (!IsEnableNewSpatialScoring(request)) { ApplyPenaltyForKeyExpansion(*segments, &results); } // Currently, we don't have spelling correction feature when in // the mixed conversion mode, so RemoveMissSpelledCandidates() is // not called. return AddPredictionToCandidates( request, true, // Include exact key result even if it's a bad suggestion. segments, &results); } // Normal prediction. SetPredictionCost(*segments, &results); if (!IsEnableNewSpatialScoring(request)) { ApplyPenaltyForKeyExpansion(*segments, &results); } const std::string &input_key = segments->conversion_segment(0).key(); const size_t input_key_len = Util::CharsLen(input_key); RemoveMissSpelledCandidates(input_key_len, &results); return AddPredictionToCandidates(request, false, // Remove exact key result. segments, &results); } DictionaryPredictor::PredictionTypes DictionaryPredictor::AggregatePredictionForRequest( const ConversionRequest &request, Segments *segments, std::vector<Result> *results) const { const bool is_mixed_conversion = IsMixedConversionEnabled(request.request()); // In mixed conversion mode, the number of real time candidates is increased. const size_t realtime_max_size = GetRealtimeCandidateMaxSize(request, *segments, is_mixed_conversion); const auto &unigram_config = GetUnigramConfig(request, *segments); return AggregatePrediction(request, realtime_max_size, unigram_config, segments, results); } DictionaryPredictor::UnigramConfig DictionaryPredictor::GetUnigramConfig( const ConversionRequest &request, const Segments &segments) const { const bool is_mixed_conversion = IsMixedConversionEnabled(request.request()); if (IsLatinInputMode(request)) { // For SUGGESTION request in Desktop, We don't look up English words when // key length is one. const size_t min_key_len_for_latin_input = (is_mixed_conversion || segments.request_type() == Segments::PREDICTION) ? 1 : 2; return {&DictionaryPredictor::AggregateUnigramCandidateForLatinInput, min_key_len_for_latin_input}; } if (is_mixed_conversion) { // In mixed conversion mode, we want to show unigram candidates even for // short keys to emulate PREDICTION mode. constexpr size_t kMinUnigramKeyLen = 1; return {&DictionaryPredictor::AggregateUnigramCandidateForMixedConversion, kMinUnigramKeyLen}; } // Normal prediction. const size_t min_unigram_key_len = (segments.request_type() == Segments::PREDICTION) ? 1 : 3; return {&DictionaryPredictor::AggregateUnigramCandidate, min_unigram_key_len}; } DictionaryPredictor::PredictionTypes DictionaryPredictor::AggregatePrediction( const ConversionRequest &request, size_t realtime_max_size, const UnigramConfig &unigram_config, Segments *segments, std::vector<Result> *results) const { DCHECK(segments); DCHECK(results); // Zero query prediction. if (segments->conversion_segment(0).key().empty()) { return AggregatePredictionForZeroQuery(request, segments, results); } const std::string &key = segments->conversion_segment(0).key(); const size_t key_len = Util::CharsLen(key); // TODO(toshiyuki): Check if we can remove this SUGGESTION check. // i.e. can we return NO_PREDICTION here for both of SUGGESTION and // PREDICTION? if (segments->request_type() == Segments::SUGGESTION) { if (!request.config().use_dictionary_suggest()) { VLOG(2) << "no_dictionary_suggest"; return NO_PREDICTION; } // Never trigger prediction if the key looks like zip code. if (DictionaryPredictor::IsZipCodeRequest(key) && key_len < 6) { return NO_PREDICTION; } } PredictionTypes selected_types = NO_PREDICTION; if (ShouldAggregateRealTimeConversionResults(request, *segments)) { AggregateRealtimeConversion(request, realtime_max_size, segments, results); selected_types |= REALTIME; } // In partial suggestion or prediction, only realtime candidates are used. if (segments->request_type() == Segments::PARTIAL_SUGGESTION || segments->request_type() == Segments::PARTIAL_PREDICTION) { return selected_types; } // Add unigram candidates. const size_t min_unigram_key_len = unigram_config.min_key_len; if (key_len >= min_unigram_key_len) { const auto &unigram_fn = unigram_config.unigram_fn; PredictionType type = (this->*unigram_fn)(request, *segments, results); selected_types |= type; } // Add bigram candidates. constexpr int kMinHistoryKeyLen = 3; if (HasHistoryKeyLongerThanOrEqualTo(*segments, kMinHistoryKeyLen)) { AggregateBigramPrediction(request, *segments, Segment::Candidate::SOURCE_INFO_NONE, results); selected_types |= BIGRAM; } // Add english candidates. if (IsLanguageAwareInputEnabled(request) && IsQwertyMobileTable(request) && key_len >= min_unigram_key_len) { AggregateEnglishPredictionUsingRawInput(request, *segments, results); selected_types |= ENGLISH; } // Add typing correction candidates. constexpr int kMinTypingCorrectionKeyLen = 3; if (IsTypingCorrectionEnabled(request) && key_len >= kMinTypingCorrectionKeyLen) { AggregateTypeCorrectingPrediction(request, *segments, results); selected_types |= TYPING_CORRECTION; } return selected_types; } bool DictionaryPredictor::AddPredictionToCandidates( const ConversionRequest &request, bool include_exact_key, Segments *segments, std::vector<Result> *results) const { DCHECK(segments); DCHECK(results); const std::string &input_key = segments->conversion_segment(0).key(); const size_t input_key_len = Util::CharsLen(input_key); std::string history_key, history_value; GetHistoryKeyAndValue(*segments, &history_key, &history_value); // exact_bigram_key does not contain ambiguity expansion, because // this is used for exact matching for the key. const std::string exact_bigram_key = history_key + input_key; Segment *segment = segments->mutable_conversion_segment(0); DCHECK(segment); // Instead of sorting all the results, we construct a heap. // This is done in linear time and // we can pop as many results as we need efficiently. std::make_heap(results->begin(), results->end(), ResultCostLess()); const size_t size = std::min( request.max_dictionary_prediction_candidates_size(), results->size()); int added = 0; std::set<std::string> seen; int added_suffix = 0; bool cursor_at_tail = request.has_composer() && request.composer().GetCursor() == request.composer().GetLength(); absl::flat_hash_map<std::string, int32_t> merged_types; #ifndef NDEBUG const bool is_debug = true; #else // NDEBUG // TODO(taku): Sets more advanced debug info depending on the verbose_level. const bool is_debug = request.config().verbose_level() >= 1; #endif // NDEBUG if (is_debug) { for (const auto &result : *results) { if (!result.removed) { merged_types[result.value] |= result.types; } } } auto add_candidate = [&](const Result &result, const std::string &key, const std::string &value, Segment::Candidate *candidate) { DCHECK(candidate); candidate->Init(); candidate->content_key = key; candidate->content_value = value; candidate->key = key; candidate->value = value; candidate->lid = result.lid; candidate->rid = result.rid; candidate->wcost = result.wcost; candidate->cost = result.cost; candidate->attributes = result.candidate_attributes; if ((!(candidate->attributes & Segment::Candidate::SPELLING_CORRECTION) && IsLatinInputMode(request)) || (result.types & SUFFIX)) { candidate->attributes |= Segment::Candidate::NO_VARIANTS_EXPANSION; candidate->attributes |= Segment::Candidate::NO_EXTRA_DESCRIPTION; } if (candidate->attributes & Segment::Candidate::PARTIALLY_KEY_CONSUMED) { candidate->consumed_key_size = result.consumed_key_size; // There are two scenarios to reach here. // 1. Auto partial suggestion. // e.g. composition わたしのなまえ| -> candidate 私の // 2. Partial suggestion. // e.g. composition わたしの|なまえ -> candidate 私の // To distinguish auto partial suggestion from (non-auto) partial // suggestion, see the cursor position. If the cursor is at the tail // of the composition, this is auto partial suggestion. if (cursor_at_tail) { candidate->attributes |= Segment::Candidate::AUTO_PARTIAL_SUGGESTION; } } candidate->source_info = result.source_info; if (result.types & REALTIME) { candidate->inner_segment_boundary = result.inner_segment_boundary; } if (result.types & TYPING_CORRECTION) { candidate->attributes |= Segment::Candidate::TYPING_CORRECTION; } SetDescription(result.types, candidate->attributes, &candidate->description); if (is_debug) { SetDebugDescription(merged_types[result.value], &candidate->description); } #ifdef MOZC_DEBUG candidate->log += "\n" + result.log; #endif // MOZC_DEBUG }; #ifdef MOZC_DEBUG auto add_debug_candidate = [&](Result result, const std::string &log) { std::string key, value; if (result.types & BIGRAM) { // remove the prefix of history key and history value. key = result.key.substr(history_key.size(), result.key.size() - history_key.size()); value = result.value.substr(history_value.size(), result.value.size() - history_value.size()); } else { key = result.key; value = result.value; } result.log.append(log); Segment::Candidate candidate; add_candidate(result, key, value, &candidate); segment->removed_candidates_for_debug_.push_back(std::move(candidate)); }; #define MOZC_ADD_DEBUG_CANDIDATE(result, log) \ add_debug_candidate(result, MOZC_WORD_LOG_MESSAGE(log)) #else // MOZC_DEBUG #define MOZC_ADD_DEBUG_CANDIDATE(result, log) \ {} #endif // MOZC_DEBUG for (size_t i = 0; i < results->size(); ++i) { // Pop a result from a heap. Please pay attention not to use results->at(i). std::pop_heap(results->begin(), results->end() - i, ResultCostLess()); const Result &result = results->at(results->size() - i - 1); if (added >= size || result.cost >= kInfinity) { break; } if (result.removed) { MOZC_ADD_DEBUG_CANDIDATE(result, "Removed flag is on"); continue; } // When |include_exact_key| is true, we don't filter the results // which have the exactly same key as the input even if it's a bad // suggestion. if (!(include_exact_key && (result.key == input_key)) && suggestion_filter_->IsBadSuggestion(result.value)) { MOZC_ADD_DEBUG_CANDIDATE(result, "Bad suggestion"); continue; } // Don't suggest exactly the same candidate as key. // if |include_exact_key| is true, that's not the case. if (!include_exact_key && !(result.types & REALTIME) && (((result.types & BIGRAM) && exact_bigram_key == result.value) || (!(result.types & BIGRAM) && input_key == result.value))) { MOZC_ADD_DEBUG_CANDIDATE(result, "Key == candidate"); continue; } std::string key, value; if (result.types & BIGRAM) { // remove the prefix of history key and history value. key = result.key.substr(history_key.size(), result.key.size() - history_key.size()); value = result.value.substr(history_value.size(), result.value.size() - history_value.size()); } else { key = result.key; value = result.value; } if (!seen.insert(value).second) { MOZC_ADD_DEBUG_CANDIDATE(result, "Duplicated"); continue; } // User input: "おーすとり" (len = 5) // key/value: "おーすとりら" "オーストラリア" (miss match pos = 4) if ((result.candidate_attributes & Segment::Candidate::SPELLING_CORRECTION) && key != input_key && input_key_len <= GetMissSpelledPosition(key, value) + 1) { MOZC_ADD_DEBUG_CANDIDATE(result, "Spelling correction"); continue; } if (result.types == SUFFIX && added_suffix++ >= 20) { // TODO(toshiyuki): Need refactoring for controlling suffix // prediction number after we will fix the appropriate number. MOZC_ADD_DEBUG_CANDIDATE(result, "Added suffix >= 20"); continue; } Segment::Candidate *candidate = segment->push_back_candidate(); add_candidate(result, key, value, candidate); ++added; } return added > 0; #undef MOZC_ADD_DEBUG_CANDIDATE } DictionaryPredictor::PredictionTypes DictionaryPredictor::AggregatePredictionForZeroQuery( const ConversionRequest &request, Segments *segments, std::vector<Result> *results) const { DCHECK(segments); DCHECK(results); if (!request.request().zero_query_suggestion()) { // Zero query is disabled by request. return NO_PREDICTION; } PredictionTypes selected_types = NO_PREDICTION; constexpr int kMinHistoryKeyLenForZeroQuery = 2; if (HasHistoryKeyLongerThanOrEqualTo(*segments, kMinHistoryKeyLenForZeroQuery)) { AggregateBigramPrediction( request, *segments, Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_BIGRAM, results); selected_types |= BIGRAM; } if (segments->history_segments_size() > 0) { AggregateZeroQuerySuffixPrediction(request, *segments, results); selected_types |= SUFFIX; } return selected_types; } DictionaryPredictor::PredictionType DictionaryPredictor::AggregateUnigramCandidateForLatinInput( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { AggregateEnglishPrediction(request, segments, results); return ENGLISH; } void DictionaryPredictor::SetDescription(PredictionTypes types, uint32_t attributes, std::string *description) { if (types & TYPING_CORRECTION) { Util::AppendStringWithDelimiter(" ", "補正", description); } if (attributes & Segment::Candidate::AUTO_PARTIAL_SUGGESTION) { Util::AppendStringWithDelimiter(" ", "部分", description); } } void DictionaryPredictor::SetDebugDescription(PredictionTypes types, std::string *description) { std::string debug_desc; if (types & UNIGRAM) { debug_desc.append(1, 'U'); } if (types & BIGRAM) { debug_desc.append(1, 'B'); } if (types & REALTIME_TOP) { debug_desc.append("R1"); } else if (types & REALTIME) { debug_desc.append(1, 'R'); } if (types & SUFFIX) { debug_desc.append(1, 'S'); } if (types & ENGLISH) { debug_desc.append(1, 'E'); } // Note that description for TYPING_CORRECTION is omitted // because it is appended by SetDescription. if (!debug_desc.empty()) { Util::AppendStringWithDelimiter(" ", debug_desc, description); } } // Returns cost for |result| when it's transitioned from |rid|. Suffix penalty // is also added for non-realtime results. int DictionaryPredictor::GetLMCost(const Result &result, int rid) const { const int cost_with_context = connector_->GetTransitionCost(rid, result.lid); int lm_cost = 0; if (result.types & SUFFIX) { // We always respect the previous context to calculate the cost of SUFFIX. // Otherwise, the suffix that doesn't match the context will be promoted. lm_cost = cost_with_context + result.wcost; } else { // Sometimes transition cost is too high and causes a bug like b/18112966. // For example, "接続詞 が" -> "始まる 動詞,五段活用,基本形" has very large // cost and "始まる" is demoted. To prevent such cases, ImmutableConverter // computes transition from BOS/EOS too; see // ImmutableConverterImpl::MakeLatticeNodesForHistorySegments(). // Here, taking the minimum of |cost1| and |cost2| has a similar effect. const int cost_without_context = connector_->GetTransitionCost(0, result.lid); lm_cost = std::min(cost_with_context, cost_without_context) + result.wcost; } if (!(result.types & REALTIME)) { // Relatime conversion already adds perfix/suffix penalties to the result. // Note that we don't add prefix penalty the role of "bunsetsu" is // ambiguous on zero-query suggestion. lm_cost += segmenter_->GetSuffixPenalty(result.rid); } return lm_cost; } namespace { class FindValueCallback : public DictionaryInterface::Callback { public: explicit FindValueCallback(absl::string_view target_value) : target_value_(target_value), found_(false) {} ResultType OnToken(absl::string_view, // key absl::string_view, // actual_key const Token &token) override { if (token.value != target_value_) { return TRAVERSE_CONTINUE; } found_ = true; token_ = token; return TRAVERSE_DONE; } bool found() const { return found_; } const Token &token() const { return token_; } private: absl::string_view target_value_; bool found_; Token token_; DISALLOW_COPY_AND_ASSIGN(FindValueCallback); }; } // namespace void DictionaryPredictor::Result::InitializeByTokenAndTypes( const Token &token, PredictionTypes types) { SetTypesAndTokenAttributes(types, token.attributes); key = token.key; value = token.value; wcost = token.cost; lid = token.lid; rid = token.rid; } void DictionaryPredictor::Result::SetTypesAndTokenAttributes( PredictionTypes prediction_types, Token::AttributesBitfield token_attr) { types = prediction_types; candidate_attributes = 0; if (types & TYPING_CORRECTION) { candidate_attributes |= Segment::Candidate::TYPING_CORRECTION; } if (types & (REALTIME | REALTIME_TOP)) { candidate_attributes |= Segment::Candidate::REALTIME_CONVERSION; } if (token_attr & Token::SPELLING_CORRECTION) { candidate_attributes |= Segment::Candidate::SPELLING_CORRECTION; } if (token_attr & Token::USER_DICTIONARY) { candidate_attributes |= (Segment::Candidate::USER_DICTIONARY | Segment::Candidate::NO_VARIANTS_EXPANSION); } } void DictionaryPredictor::Result::SetSourceInfoForZeroQuery( ZeroQueryType type) { switch (type) { case ZERO_QUERY_NONE: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_NONE; return; case ZERO_QUERY_NUMBER_SUFFIX: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_NUMBER_SUFFIX; return; case ZERO_QUERY_EMOTICON: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_EMOTICON; return; case ZERO_QUERY_EMOJI: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_EMOJI; return; case ZERO_QUERY_BIGRAM: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_BIGRAM; return; case ZERO_QUERY_SUFFIX: source_info |= Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_SUFFIX; return; default: LOG(ERROR) << "Should not come here"; return; } } bool DictionaryPredictor::Result::IsUserDictionaryResult() const { return (candidate_attributes & Segment::Candidate::USER_DICTIONARY) != 0; } // Here, we treat the word as English when its key consists of Latin // characters. bool DictionaryPredictor::Result::IsEnglishEntryResult() const { return Util::IsEnglishTransliteration(key); } bool DictionaryPredictor::GetHistoryKeyAndValue(const Segments &segments, std::string *key, std::string *value) const { DCHECK(key); DCHECK(value); if (segments.history_segments_size() == 0) { return false; } const Segment &history_segment = segments.history_segment(segments.history_segments_size() - 1); if (history_segment.candidates_size() == 0) { return false; } key->assign(history_segment.candidate(0).key); value->assign(history_segment.candidate(0).value); return true; } void DictionaryPredictor::SetPredictionCost( const Segments &segments, std::vector<Result> *results) const { DCHECK(results); int rid = 0; // 0 (BOS) is default if (segments.history_segments_size() > 0) { const Segment &history_segment = segments.history_segment(segments.history_segments_size() - 1); if (history_segment.candidates_size() > 0) { rid = history_segment.candidate(0).rid; // use history segment's id } } const std::string &input_key = segments.conversion_segment(0).key(); std::string history_key, history_value; GetHistoryKeyAndValue(segments, &history_key, &history_value); const std::string bigram_key = history_key + input_key; const bool is_suggestion = (segments.request_type() == Segments::SUGGESTION); // use the same scoring function for both unigram/bigram. // Bigram will be boosted because we pass the previous // key as a context information. const size_t bigram_key_len = Util::CharsLen(bigram_key); const size_t unigram_key_len = Util::CharsLen(input_key); // In the loop below, we track the minimum cost among those REALTIME // candidates that have the same key length as |input_key| so that we can set // a slightly smaller cost to REALTIME_TOP than these. int realtime_cost_min = kInfinity; Result *realtime_top_result = nullptr; for (size_t i = 0; i < results->size(); ++i) { const Result &result = results->at(i); // The cost of REALTIME_TOP is determined after the loop based on the // minimum cost for REALTIME. Just remember the pointer of result. if (result.types & REALTIME_TOP) { realtime_top_result = &results->at(i); continue; } const int cost = GetLMCost(result, rid); const size_t query_len = (result.types & BIGRAM) ? bigram_key_len : unigram_key_len; const size_t key_len = Util::CharsLen(result.key); if (IsAggressiveSuggestion(query_len, key_len, cost, is_suggestion, results->size())) { results->at(i).cost = kInfinity; continue; } // cost = -500 * log(lang_prob(w) * (1 + remain_length)) -- (1) // where lang_prob(w) is a language model probability of the word "w", and // remain_length the length of key user must type to input "w". // // Example: // key/value = "とうきょう/東京" // user_input = "とう" // remain_length = len("とうきょう") - len("とう") = 3 // // By taking the log of (1), // cost = -500 [log(lang_prob(w)) + log(1 + ramain_length)] // = -500 * log(lang_prob(w)) + 500 * log(1 + remain_length) // = cost - 500 * log(1 + remain_length) // Because 500 * log(lang_prob(w)) = -cost. // // lang_prob(w) * (1 + remain_length) represents how user can reduce // the total types by choosing this candidate. // Before this simple algorithm, we have been using an SVM-base scoring, // but we stop usign it with the following reasons. // 1) Hard to maintain the ranking. // 2) Hard to control the final results of SVM. // 3) Hard to debug. // 4) Since we used the log(remain_length) as a feature, // the new ranking algorithm and SVM algorithm was essentially // the same. // 5) Since we used the length of value as a feature, we find // inconsistencies between the conversion and the prediction // -- the results of top prediction and the top conversion // (the candidate shown after the space key) may differ. // // The new function brings consistent results. If two candidate // have the same reading (key), they should have the same cost bonus // from the length part. This implies that the result is reranked by // the language model probability as long as the key part is the same. // This behavior is baisically the same as the converter. // // TODO(team): want find the best parameter instead of kCostFactor. constexpr int kCostFactor = 500; results->at(i).cost = cost - kCostFactor * log(1.0 + std::max<int>(0, key_len - query_len)); // Update the minimum cost for REALTIME candidates that have the same key // length as input_key. if (result.types & REALTIME && result.cost < realtime_cost_min && result.key.size() == input_key.size()) { realtime_cost_min = result.cost; } } // Ensure that the REALTIME_TOP candidate has relatively smaller cost than // those of REALTIME candidates. if (realtime_top_result != nullptr) { realtime_top_result->cost = std::max(0, realtime_cost_min - 10); } } void DictionaryPredictor::SetPredictionCostForMixedConversion( const Segments &segments, std::vector<Result> *results) const { DCHECK(results); // ranking for mobile int rid = 0; // 0 (BOS) is default int prev_cost = 0; // cost of the last history candidate. if (segments.history_segments_size() > 0) { const Segment &history_segment = segments.history_segment(segments.history_segments_size() - 1); if (history_segment.candidates_size() > 0) { rid = history_segment.candidate(0).rid; // use history segment's id prev_cost = history_segment.candidate(0).cost; if (prev_cost == 0) { // if prev_cost is set to be 0 for some reason, use default cost. prev_cost = 5000; } } } const size_t input_key_len = Util::CharsLen(segments.conversion_segment(0).key()); for (Result &result : *results) { int cost = GetLMCost(result, rid); MOZC_WORD_LOG(result, absl::StrCat("GetLMCost: ", cost)); // Demote filtered word here, because they are not filtered for exact match. // Even for exact match, we don't want to show aggressive words with high // ranking. if (suggestion_filter_->IsBadSuggestion(result.value)) { // Cost penalty means for bad suggestion. // 3453 = 500 * log(1000) constexpr int kBadSuggestionPenalty = 3453; cost += kBadSuggestionPenalty; MOZC_WORD_LOG(result, absl::StrCat("BadSuggestionPenalty: ", cost)); } // Make exact candidates to have higher ranking. // Because for mobile, suggestion is the main candidates and // users expect the candidates for the input key on the candidates. if (result.types & (UNIGRAM | TYPING_CORRECTION)) { const size_t key_len = Util::CharsLen(result.key); if (key_len > input_key_len) { // Cost penalty means that exact candidates are evaluated // 50 times bigger in frequency. // Note that the cost is calculated by cost = -500 * log(prob) // 1956 = 500 * log(50) constexpr int kNotExactPenalty = 1956; cost += kNotExactPenalty; MOZC_WORD_LOG(result, absl::StrCat("Unigram | Typing correction: ", cost)); } } if (result.types & BIGRAM) { // When user inputs "六本木" and there is an entry // "六本木ヒルズ" in the dictionary, we can suggest // "ヒルズ" as a ZeroQuery suggestion. In this case, // We can't calcurate the transition cost between "六本木" // and "ヒルズ". If we ignore the transition cost, // bigram-based suggestion will be overestimated. // Here we use kDefaultTransitionCost as an // transition cost between "六本木" and "ヒルズ". Currently, // the cost is basically the same as the cost between // "名詞,一般" and "名詞,一般". // TODO(taku): Adjust these parameters. // Seems the bigram is overestimated. constexpr int kDefaultTransitionCost = 1347; // Promoting bigram candidates. constexpr int kBigramBonus = 800; // ~= 500*ln(5) cost += (kDefaultTransitionCost - kBigramBonus - prev_cost); MOZC_WORD_LOG(result, absl::StrCat("Bigram: ", cost)); } if (result.candidate_attributes & Segment::Candidate::USER_DICTIONARY && result.lid != general_symbol_id_) { // Decrease cost for words from user dictionary in order to promote them, // provided that it is not a general symbol (Note: emoticons are mapped to // general symbol). Currently user dictionary words are evaluated 5 times // bigger in frequency, being capped by 1000 (this number is adhoc, so // feel free to adjust). constexpr int kUserDictionaryPromotionFactor = 804; // 804 = 500 * log(5) constexpr int kUserDictionaryCostUpperLimit = 1000; cost = std::min(cost - kUserDictionaryPromotionFactor, kUserDictionaryCostUpperLimit); MOZC_WORD_LOG(result, absl::StrCat("User dictionary: ", cost)); } // Note that the cost is defined as -500 * log(prob). // Even after the ad hoc manipulations, cost must remain larger than 0. result.cost = std::max(1, cost); MOZC_WORD_LOG(result, absl::StrCat("SetLMCost: ", result.cost)); } } // This method should be deprecated, as it is unintentionally adding extra // spatial penalty to the candidate. void DictionaryPredictor::ApplyPenaltyForKeyExpansion( const Segments &segments, std::vector<Result> *results) const { if (segments.conversion_segments_size() == 0) { return; } // Cost penalty 1151 means that expanded candidates are evaluated // 10 times smaller in frequency. // Note that the cost is calcurated by cost = -500 * log(prob) // 1151 = 500 * log(10) constexpr int kKeyExpansionPenalty = 1151; const std::string &conversion_key = segments.conversion_segment(0).key(); for (size_t i = 0; i < results->size(); ++i) { Result &result = results->at(i); if (result.types & TYPING_CORRECTION) { continue; } if (!absl::StartsWith(result.key, conversion_key)) { result.cost += kKeyExpansionPenalty; MOZC_WORD_LOG(result, absl::StrCat("KeyExpansionPenalty: ", result.cost)); } } } size_t DictionaryPredictor::GetMissSpelledPosition( const std::string &key, const std::string &value) const { std::string hiragana_value; japanese_util::KatakanaToHiragana(value, &hiragana_value); // value is mixed type. return true if key == request_key. if (Util::GetScriptType(hiragana_value) != Util::HIRAGANA) { return Util::CharsLen(key); } // Find the first position of character where miss spell occurs. int position = 0; ConstChar32Iterator key_iter(key); for (ConstChar32Iterator hiragana_iter(hiragana_value); !hiragana_iter.Done() && !key_iter.Done(); hiragana_iter.Next(), key_iter.Next(), ++position) { if (hiragana_iter.Get() != key_iter.Get()) { return position; } } // not find. return the length of key. while (!key_iter.Done()) { ++position; key_iter.Next(); } return position; } void DictionaryPredictor::RemoveMissSpelledCandidates( size_t request_key_len, std::vector<Result> *results) const { DCHECK(results); if (results->size() <= 1) { return; } int spelling_correction_size = 5; for (size_t i = 0; i < results->size(); ++i) { const Result &result = (*results)[i]; if (!(result.candidate_attributes & Segment::Candidate::SPELLING_CORRECTION)) { continue; } // Only checks at most 5 spelling corrections to avoid the case // like all candidates have SPELLING_CORRECTION. if (--spelling_correction_size == 0) { return; } std::vector<size_t> same_key_index, same_value_index; for (size_t j = 0; j < results->size(); ++j) { if (i == j) { continue; } const Result &target_result = (*results)[j]; if (target_result.candidate_attributes & Segment::Candidate::SPELLING_CORRECTION) { continue; } if (target_result.key == result.key) { same_key_index.push_back(j); } if (target_result.value == result.value) { same_value_index.push_back(j); } } // delete same_key_index and same_value_index if (!same_key_index.empty() && !same_value_index.empty()) { results->at(i).removed = true; MOZC_WORD_LOG(results->at(i), "Removed. same_(key|value)_index."); for (size_t k = 0; k < same_key_index.size(); ++k) { results->at(same_key_index[k]).removed = true; MOZC_WORD_LOG(results->at(i), "Removed. same_(key|value)_index."); } } else if (same_key_index.empty() && !same_value_index.empty()) { results->at(i).removed = true; MOZC_WORD_LOG(results->at(i), "Removed. same_value_index."); } else if (!same_key_index.empty() && same_value_index.empty()) { for (size_t k = 0; k < same_key_index.size(); ++k) { results->at(same_key_index[k]).removed = true; MOZC_WORD_LOG(results->at(i), "Removed. same_key_index."); } if (request_key_len <= GetMissSpelledPosition(result.key, result.value)) { results->at(i).removed = true; MOZC_WORD_LOG(results->at(i), "Removed. Invalid MissSpelledPosition."); } } } } bool DictionaryPredictor::IsAggressiveSuggestion( size_t query_len, size_t key_len, int cost, bool is_suggestion, size_t total_candidates_size) const { // Temporal workaround for fixing the problem where longer sentence-like // suggestions are shown when user input is very short. // "ただしい" => "ただしいけめんにかぎる" // "それでもぼ" => "それでもぼくはやっていない". // If total_candidates_size is small enough, we don't perform // special filtering. e.g., "せんとち" has only two candidates, so // showing "千と千尋の神隠し" is OK. // Also, if the cost is too small (< 5000), we allow to display // long phrases. Examples include "よろしくおねがいします". if (is_suggestion && total_candidates_size >= 10 && key_len >= 8 && cost >= 5000 && query_len <= static_cast<size_t>(0.4 * key_len)) { return true; } return false; } size_t DictionaryPredictor::GetRealtimeCandidateMaxSize( const ConversionRequest &request, const Segments &segments, bool mixed_conversion) const { const Segments::RequestType request_type = segments.request_type(); DCHECK(request_type == Segments::PREDICTION || request_type == Segments::SUGGESTION || request_type == Segments::PARTIAL_PREDICTION || request_type == Segments::PARTIAL_SUGGESTION); if (segments.conversion_segments_size() == 0) { return 0; } const bool is_long_key = IsLongKeyForRealtimeCandidates(segments); const size_t max_size = GetMaxSizeForRealtimeCandidates(request, segments, is_long_key); const size_t default_size = GetDefaultSizeForRealtimeCandidates(is_long_key); size_t size = 0; switch (request_type) { case Segments::PREDICTION: size = mixed_conversion ? max_size : default_size; break; case Segments::SUGGESTION: // Fewer candidatats are needed basically. // But on mixed_conversion mode we should behave like as conversion mode. size = mixed_conversion ? default_size : 1; break; case Segments::PARTIAL_PREDICTION: // This is kind of prediction so richer result than PARTIAL_SUGGESTION // is needed. size = max_size; break; case Segments::PARTIAL_SUGGESTION: // PARTIAL_SUGGESTION works like as conversion mode so returning // some candidates is needed. size = default_size; break; default: size = 0; // Never reach here } return std::min(max_size, size); } bool DictionaryPredictor::PushBackTopConversionResult( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK_EQ(1, segments.conversion_segments_size()); Segments tmp_segments = segments; ConversionRequest tmp_request = request; tmp_request.set_max_conversion_candidates_size(20); tmp_request.set_composer_key_selection(ConversionRequest::PREDICTION_KEY); // Some rewriters cause significant performance loss. So we skip them. tmp_request.set_skip_slow_rewriters(true); // This method emulates usual converter's behavior so here disable // partial candidates. tmp_request.set_create_partial_candidates(false); if (!converter_->StartConversionForRequest(tmp_request, &tmp_segments)) { return false; } results->push_back(Result()); Result *result = &results->back(); result->key = segments.conversion_segment(0).key(); result->lid = tmp_segments.conversion_segment(0).candidate(0).lid; result->rid = tmp_segments .conversion_segment(tmp_segments.conversion_segments_size() - 1) .candidate(0) .rid; result->SetTypesAndTokenAttributes(REALTIME | REALTIME_TOP, Token::NONE); result->candidate_attributes |= Segment::Candidate::NO_VARIANTS_EXPANSION; // Concatenate the top candidates. // Note that since StartConversionForRequest() runs in conversion mode, the // resulting |tmp_segments| doesn't have inner_segment_boundary. We need to // construct it manually here. // TODO(noriyukit): This is code duplicate in converter/nbest_generator.cc and // we should refactor code after finding more good design. bool inner_segment_boundary_success = true; for (size_t i = 0; i < tmp_segments.conversion_segments_size(); ++i) { const Segment &segment = tmp_segments.conversion_segment(i); const Segment::Candidate &candidate = segment.candidate(0); result->value.append(candidate.value); result->wcost += candidate.cost; uint32_t encoded_lengths; if (inner_segment_boundary_success && Segment::Candidate::EncodeLengths( candidate.key.size(), candidate.value.size(), candidate.content_key.size(), candidate.content_value.size(), &encoded_lengths)) { result->inner_segment_boundary.push_back(encoded_lengths); } else { inner_segment_boundary_success = false; } } if (!inner_segment_boundary_success) { LOG(WARNING) << "Failed to construct inner segment boundary"; result->inner_segment_boundary.clear(); } return true; } void DictionaryPredictor::AggregateRealtimeConversion( const ConversionRequest &request, size_t realtime_candidates_size, Segments *segments, std::vector<Result> *results) const { DCHECK(converter_); DCHECK(immutable_converter_); DCHECK(segments); DCHECK(results); // TODO(noriyukit): Currently, |segments| is abused as a temporary output from // the immutable converter. Therefore, the first segment needs to be mutable. // Fix this bad abuse. Segment *segment = segments->mutable_conversion_segment(0); DCHECK(!segment->key().empty()); // First insert a top conversion result. if (request.use_actual_converter_for_realtime_conversion()) { if (!PushBackTopConversionResult(request, *segments, results)) { LOG(WARNING) << "Realtime conversion with converter failed"; } } if (realtime_candidates_size == 0) { return; } // In what follows, add results from immutable converter. // TODO(noriyukit): The |immutable_converter_| used below can be replaced by // |converter_| in principle. There's a problem of ranking when we get // multiple segments, i.e., how to concatenate candidates in each segment. // Currently, immutable converter handles such ranking in prediction mode to // generate single segment results. So we want to share that code. // Preserve the current candidates_size to restore segments at the end of this // method. const size_t prev_candidates_size = segment->candidates_size(); const ConversionRequest request_for_realtime = GetConversionRequestForRealtimeCandidates( request, realtime_candidates_size, prev_candidates_size); if (!immutable_converter_->ConvertForRequest(request_for_realtime, segments) || prev_candidates_size >= segment->candidates_size()) { LOG(WARNING) << "Convert failed"; return; } // A little tricky treatment: // Since ImmutableConverter::Convert creates a set of new candidates, // copy them into the array of Results. for (size_t i = prev_candidates_size; i < segment->candidates_size(); ++i) { const Segment::Candidate &candidate = segment->candidate(i); results->push_back(Result()); Result *result = &results->back(); result->key = candidate.key; result->value = candidate.value; result->wcost = candidate.wcost; result->lid = candidate.lid; result->rid = candidate.rid; result->inner_segment_boundary = candidate.inner_segment_boundary; result->SetTypesAndTokenAttributes(REALTIME, Token::NONE); result->candidate_attributes |= candidate.attributes; result->consumed_key_size = candidate.consumed_key_size; } // Remove candidates created by ImmutableConverter. segment->erase_candidates(prev_candidates_size, segment->candidates_size() - prev_candidates_size); } size_t DictionaryPredictor::GetCandidateCutoffThreshold( const Segments &segments) const { DCHECK(segments.request_type() == Segments::PREDICTION || segments.request_type() == Segments::SUGGESTION); if (segments.request_type() == Segments::PREDICTION) { // If PREDICTION, many candidates are needed than SUGGESTION. return kPredictionMaxResultsSize; } return kSuggestionMaxResultsSize; } DictionaryPredictor::PredictionType DictionaryPredictor::AggregateUnigramCandidate( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK(results); DCHECK(dictionary_); DCHECK(segments.request_type() == Segments::PREDICTION || segments.request_type() == Segments::SUGGESTION); const size_t cutoff_threshold = GetCandidateCutoffThreshold(segments); const size_t prev_results_size = results->size(); GetPredictiveResults(*dictionary_, "", request, segments, UNIGRAM, cutoff_threshold, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, results); const size_t unigram_results_size = results->size() - prev_results_size; // If size reaches max_results_size (== cutoff_threshold). // we don't show the candidates, since disambiguation from // 256 candidates is hard. (It may exceed max_results_size, because this is // just a limit for each backend, so total number may be larger) if (unigram_results_size >= cutoff_threshold) { results->resize(prev_results_size); } return UNIGRAM; } DictionaryPredictor::PredictionType DictionaryPredictor::AggregateUnigramCandidateForMixedConversion( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK(segments.request_type() == Segments::PREDICTION || segments.request_type() == Segments::SUGGESTION); AggregateUnigramCandidateForMixedConversion(*dictionary_, request, segments, unknown_id_, results); return UNIGRAM; } void DictionaryPredictor::AggregateUnigramCandidateForMixedConversion( const dictionary::DictionaryInterface &dictionary, const ConversionRequest &request, const Segments &segments, int unknown_id, std::vector<Result> *results) { const size_t cutoff_threshold = kPredictionMaxResultsSize; std::vector<Result> raw_result; // No history key GetPredictiveResults(dictionary, "", request, segments, UNIGRAM, cutoff_threshold, Segment::Candidate::SOURCE_INFO_NONE, unknown_id, &raw_result); // Hereafter, we split "Needed Results" and "(maybe) Unneeded Results." // The algorithm is: // 1) Take the Result with minimum cost. // 2) Remove results which is "redundant" (defined by MaybeRedundant), // from remaining results. // 3) Repeat 1) and 2) five times. // Note: to reduce the number of memory allocation, we swap out the // "redundant" results to the end of the |results| vector. constexpr size_t kDeleteTrialNum = 5; // min_iter is the beginning of the remaining results (inclusive), and // max_iter is the end of the remaining results (exclusive). typedef std::vector<Result>::iterator Iter; Iter min_iter = raw_result.begin(); Iter max_iter = raw_result.end(); for (size_t i = 0; i < kDeleteTrialNum; ++i) { if (min_iter == max_iter) { break; } // Find the Result with minimum cost. Swap it with the beginning element. std::iter_swap(min_iter, std::min_element(min_iter, max_iter, ResultWCostLess())); const Result &reference_result = *min_iter; // Preserve the reference result. ++min_iter; // Traverse all remaining elements and check if each result is redundant. for (Iter iter = min_iter; iter != max_iter;) { // - We do not filter user dictionary word. const bool should_check_redundant = !iter->IsUserDictionaryResult(); if (should_check_redundant && MaybeRedundant(reference_result.value, iter->value)) { // Swap out the redundant result. --max_iter; std::iter_swap(iter, max_iter); } else { ++iter; } } } // Then the |raw_result| contains; // [begin, min_iter): reference results in the above loop. // [max_iter, end): (maybe) redundant results. // [min_iter, max_iter): remaining results. // Here, we revive the redundant results up to five in the result cost order. constexpr size_t kDoNotDeleteNum = 5; if (std::distance(max_iter, raw_result.end()) >= kDoNotDeleteNum) { std::partial_sort(max_iter, max_iter + kDoNotDeleteNum, raw_result.end(), ResultWCostLess()); max_iter += kDoNotDeleteNum; } else { max_iter = raw_result.end(); } // Finally output the result. results->insert(results->end(), raw_result.begin(), max_iter); } void DictionaryPredictor::AggregateBigramPrediction( const ConversionRequest &request, const Segments &segments, Segment::Candidate::SourceInfo source_info, std::vector<Result> *results) const { DCHECK(results); DCHECK(dictionary_); // TODO(toshiyuki): Support suggestion from the last 2 histories. // ex) "六本木"+"ヒルズ"->"レジデンス" std::string history_key, history_value; if (!GetHistoryKeyAndValue(segments, &history_key, &history_value)) { return; } AddBigramResultsFromHistory(history_key, history_value, request, segments, source_info, results); } void DictionaryPredictor::AddBigramResultsFromHistory( const std::string &history_key, const std::string &history_value, const ConversionRequest &request, const Segments &segments, Segment::Candidate::SourceInfo source_info, std::vector<Result> *results) const { // Check that history_key/history_value are in the dictionary. FindValueCallback find_history_callback(history_value); dictionary_->LookupPrefix(history_key, request, &find_history_callback); // History value is not found in the dictionary. // User may create this the history candidate from T13N or segment // expand/shrinkg operations. if (!find_history_callback.found()) { return; } const size_t cutoff_threshold = GetCandidateCutoffThreshold(segments); const size_t prev_results_size = results->size(); GetPredictiveResultsForBigram(*dictionary_, history_key, history_value, request, segments, BIGRAM, cutoff_threshold, source_info, unknown_id_, results); const size_t bigram_results_size = results->size() - prev_results_size; // if size reaches max_results_size, // we don't show the candidates, since disambiguation from // 256 candidates is hard. (It may exceed max_results_size, because this is // just a limit for each backend, so total number may be larger) if (bigram_results_size >= cutoff_threshold) { results->resize(prev_results_size); return; } // Obtain the character type of the last history value. const size_t history_value_size = Util::CharsLen(history_value); if (history_value_size == 0) { return; } const Util::ScriptType history_ctype = Util::GetScriptType(history_value); const Util::ScriptType last_history_ctype = Util::GetScriptType( Util::Utf8SubString(history_value, history_value_size - 1, 1)); for (size_t i = prev_results_size; i < results->size(); ++i) { CheckBigramResult(find_history_callback.token(), history_ctype, last_history_ctype, request, &(*results)[i]); } } // Filter out irrelevant bigrams. For example, we don't want to // suggest "リカ" from the history "アメ". void DictionaryPredictor::CheckBigramResult( const Token &history_token, const Util::ScriptType history_ctype, const Util::ScriptType last_history_ctype, const ConversionRequest &request, Result *result) const { DCHECK(result); const std::string &history_key = history_token.key; const std::string &history_value = history_token.value; const std::string key(result->key, history_key.size(), result->key.size() - history_key.size()); const std::string value(result->value, history_value.size(), result->value.size() - history_value.size()); // Don't suggest 0-length key/value. if (key.empty() || value.empty()) { result->removed = true; MOZC_WORD_LOG(*result, "Removed. key, value or both are empty."); return; } const Util::ScriptType ctype = Util::GetScriptType(Util::Utf8SubString(value, 0, 1)); if (history_ctype == Util::KANJI && ctype == Util::KATAKANA) { // Do not filter "六本木ヒルズ" MOZC_WORD_LOG(*result, "Valid bigram. Kanji + Katakana pattern."); return; } // If freq("アメ") < freq("アメリカ"), we don't // need to suggest it. As "アメリカ" should already be // suggested when user type "アメ". // Note that wcost = -500 * log(prob). if (ctype != Util::KANJI && history_token.cost > result->wcost) { result->removed = true; MOZC_WORD_LOG(*result, "Removed. The prefix's score is lower than the whole."); return; } // If character type doesn't change, this boundary might NOT // be a word boundary. Only use iif the entire key is reasonably long. const size_t key_len = Util::CharsLen(result->key); if (ctype == last_history_ctype && ((ctype == Util::HIRAGANA && key_len <= 9) || (ctype == Util::KATAKANA && key_len <= 5))) { result->removed = true; MOZC_WORD_LOG(*result, "Removed. Short Hiragana (<= 9) or Katakana (<= 5)"); return; } // The suggested key/value pair must exist in the dictionary. // For example, we don't want to suggest "ターネット" from // the history "イン". // If character type is Kanji and the suggestion is not a // zero_query_suggestion, we relax this condition, as there are // many Kanji-compounds which may not in the dictionary. For example, // we want to suggest "霊長類研究所" from the history "京都大学". if (ctype == Util::KANJI && Util::CharsLen(value) >= 2) { // Do not filter this. // TODO(toshiyuki): one-length kanji prediciton may be annoying other than // some exceptions, "駅", "口", etc MOZC_WORD_LOG(*result, "Valid bigram. Kanji suffix (>= 2)."); return; } // Check if the word is in the dictionary or not. // For Hiragana words, check if that word is in a key of values. // This is for a situation that // ありがとうございました is not in the dictionary, but // ありがとう御座いました is in the dictionary. if (ctype == Util::HIRAGANA) { if (!dictionary_->HasKey(key)) { result->removed = true; MOZC_WORD_LOG(*result, "Removed. No keys are found."); return; } } else { FindValueCallback callback(value); dictionary_->LookupPrefix(key, request, &callback); if (!callback.found()) { result->removed = true; MOZC_WORD_LOG(*result, "Removed. No prefix found."); return; } } MOZC_WORD_LOG(*result, "Valid bigram."); } void DictionaryPredictor::GetPredictiveResults( const DictionaryInterface &dictionary, const std::string &history_key, const ConversionRequest &request, const Segments &segments, PredictionTypes types, size_t lookup_limit, Segment::Candidate::SourceInfo source_info, int unknown_id_, std::vector<Result> *results) { if (!request.has_composer()) { std::string input_key = history_key; input_key.append(segments.conversion_segment(0).key()); PredictiveLookupCallback callback(types, lookup_limit, input_key.size(), nullptr, source_info, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); return; } // If we have ambiguity for the input, get expanded key. // Example1 roman input: for "あk", we will get |base|, "あ" and |expanded|, // "か", "き", etc // Example2 kana input: for "あか", we will get |base|, "あ" and |expanded|, // "か", and "が". std::string base; std::set<std::string> expanded; request.composer().GetQueriesForPrediction(&base, &expanded); std::string input_key; if (expanded.empty()) { input_key.assign(history_key).append(base); PredictiveLookupCallback callback(types, lookup_limit, input_key.size(), nullptr, source_info, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); return; } // `non_expanded_original_key` keeps the original key request before // key expansions. This key is passed to the callback so that it can // identify whether the key is actually expanded or not. const std::string non_expanded_original_key = IsEnableNewSpatialScoring(request) ? history_key + segments.conversion_segment(0).key() : ""; // |expanded| is a very small set, so calling LookupPredictive multiple // times is not so expensive. Also, the number of lookup results is limited // by |lookup_limit|. for (const std::string &expanded_char : expanded) { input_key.assign(history_key).append(base).append(expanded_char); PredictiveLookupCallback callback(types, lookup_limit, input_key.size(), nullptr, source_info, unknown_id_, non_expanded_original_key, GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); } } void DictionaryPredictor::GetPredictiveResultsForBigram( const DictionaryInterface &dictionary, const std::string &history_key, const std::string &history_value, const ConversionRequest &request, const Segments &segments, PredictionTypes types, size_t lookup_limit, Segment::Candidate::SourceInfo source_info, int unknown_id_, std::vector<Result> *results) const { if (!request.has_composer()) { std::string input_key = history_key; input_key.append(segments.conversion_segment(0).key()); PredictiveBigramLookupCallback callback( types, lookup_limit, input_key.size(), nullptr, history_value, source_info, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); return; } // If we have ambiguity for the input, get expanded key. // Example1 roman input: for "あk", we will get |base|, "あ" and |expanded|, // "か", "き", etc // Example2 kana input: for "あか", we will get |base|, "あ" and |expanded|, // "か", and "が". std::string base; std::set<std::string> expanded; request.composer().GetQueriesForPrediction(&base, &expanded); const std::string input_key = history_key + base; const std::string non_expanded_original_key = IsEnableNewSpatialScoring(request) ? history_key + segments.conversion_segment(0).key() : ""; PredictiveBigramLookupCallback callback( types, lookup_limit, input_key.size(), expanded.empty() ? nullptr : &expanded, history_value, source_info, unknown_id_, non_expanded_original_key, GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); } void DictionaryPredictor::GetPredictiveResultsForEnglishKey( const DictionaryInterface &dictionary, const ConversionRequest &request, const std::string &input_key, PredictionTypes types, size_t lookup_limit, std::vector<Result> *results) const { const size_t prev_results_size = results->size(); if (Util::IsUpperAscii(input_key)) { // For upper case key, look up its lower case version and then transform // the results to upper case. std::string key(input_key); Util::LowerString(&key); PredictiveLookupCallback callback(types, lookup_limit, key.size(), nullptr, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(key, request, &callback); for (size_t i = prev_results_size; i < results->size(); ++i) { Util::UpperString(&(*results)[i].value); } } else if (Util::IsCapitalizedAscii(input_key)) { // For capitalized key, look up its lower case version and then transform // the results to capital. std::string key(input_key); Util::LowerString(&key); PredictiveLookupCallback callback(types, lookup_limit, key.size(), nullptr, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(key, request, &callback); for (size_t i = prev_results_size; i < results->size(); ++i) { Util::CapitalizeString(&(*results)[i].value); } } else { // For other cases (lower and as-is), just look up directly. PredictiveLookupCallback callback( types, lookup_limit, input_key.size(), nullptr, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); } // If input mode is FULL_ASCII, then convert the results to full-width. if (request.has_composer() && request.composer().GetInputMode() == transliteration::FULL_ASCII) { std::string tmp; for (size_t i = prev_results_size; i < results->size(); ++i) { tmp.assign((*results)[i].value); japanese_util::HalfWidthAsciiToFullWidthAscii(tmp, &(*results)[i].value); } } } void DictionaryPredictor::GetPredictiveResultsUsingTypingCorrection( const DictionaryInterface &dictionary, const std::string &history_key, const ConversionRequest &request, const Segments &segments, PredictionTypes types, size_t lookup_limit, std::vector<Result> *results) const { if (!request.has_composer()) { return; } std::vector<composer::TypeCorrectedQuery> queries; request.composer().GetTypeCorrectedQueriesForPrediction(&queries); for (size_t query_index = 0; query_index < queries.size(); ++query_index) { const composer::TypeCorrectedQuery &query = queries[query_index]; const std::string input_key = history_key + query.base; const size_t previous_results_size = results->size(); PredictiveLookupCallback callback( types, lookup_limit, input_key.size(), query.expanded.empty() ? nullptr : &query.expanded, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, "", GetSpatialCostParams(request), results); dictionary.LookupPredictive(input_key, request, &callback); for (size_t i = previous_results_size; i < results->size(); ++i) { (*results)[i].wcost += query.cost; } lookup_limit -= results->size() - previous_results_size; if (lookup_limit <= 0) { break; } } } // static bool DictionaryPredictor::GetZeroQueryCandidatesForKey( const ConversionRequest &request, const std::string &key, const ZeroQueryDict &dict, std::vector<ZeroQueryResult> *results) { const int32_t available_emoji_carrier = request.request().available_emoji_carrier(); DCHECK(results); results->clear(); auto range = dict.equal_range(key); if (range.first == range.second) { return false; } for (; range.first != range.second; ++range.first) { const auto &entry = range.first; if (entry.type() != ZERO_QUERY_EMOJI) { results->push_back( std::make_pair(std::string(entry.value()), entry.type())); continue; } if (available_emoji_carrier & Request::UNICODE_EMOJI && entry.emoji_type() & EMOJI_UNICODE) { results->push_back( std::make_pair(std::string(entry.value()), entry.type())); continue; } if ((available_emoji_carrier & Request::DOCOMO_EMOJI && entry.emoji_type() & EMOJI_DOCOMO) || (available_emoji_carrier & Request::SOFTBANK_EMOJI && entry.emoji_type() & EMOJI_SOFTBANK) || (available_emoji_carrier & Request::KDDI_EMOJI && entry.emoji_type() & EMOJI_KDDI)) { std::string android_pua; Util::Ucs4ToUtf8(entry.emoji_android_pua(), &android_pua); results->push_back(std::make_pair(android_pua, entry.type())); } } return !results->empty(); } // static void DictionaryPredictor::AppendZeroQueryToResults( const std::vector<ZeroQueryResult> &candidates, uint16_t lid, uint16_t rid, std::vector<Result> *results) { int cost = 0; for (size_t i = 0; i < candidates.size(); ++i) { // Increment cost to show the candidates in order. constexpr int kSuffixPenalty = 10; results->push_back(Result()); Result *result = &results->back(); result->SetTypesAndTokenAttributes(SUFFIX, Token::NONE); result->SetSourceInfoForZeroQuery(candidates[i].second); result->key = candidates[i].first; result->value = candidates[i].first; result->wcost = cost; result->lid = lid; result->rid = rid; cost += kSuffixPenalty; } } // Returns true if we add zero query result. bool DictionaryPredictor::AggregateNumberZeroQueryPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { std::string number_key; if (!GetNumberHistory(segments, &number_key)) { return false; } std::vector<ZeroQueryResult> candidates_for_number_key; GetZeroQueryCandidatesForKey(request, number_key, zero_query_number_dict_, &candidates_for_number_key); std::vector<ZeroQueryResult> default_candidates_for_number; GetZeroQueryCandidatesForKey(request, "default", zero_query_number_dict_, &default_candidates_for_number); DCHECK(!default_candidates_for_number.empty()); AppendZeroQueryToResults(candidates_for_number_key, counter_suffix_word_id_, counter_suffix_word_id_, results); AppendZeroQueryToResults(default_candidates_for_number, counter_suffix_word_id_, counter_suffix_word_id_, results); return true; } // Returns true if we add zero query result. bool DictionaryPredictor::AggregateZeroQueryPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { const size_t history_size = segments.history_segments_size(); if (history_size <= 0) { return false; } const Segment &last_segment = segments.history_segment(history_size - 1); DCHECK_GT(last_segment.candidates_size(), 0); const std::string &history_value = last_segment.candidate(0).value; std::vector<ZeroQueryResult> candidates; if (!GetZeroQueryCandidatesForKey(request, history_value, zero_query_dict_, &candidates)) { return false; } const uint16_t kId = 0; // EOS AppendZeroQueryToResults(candidates, kId, kId, results); return true; } void DictionaryPredictor::AggregateSuffixPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK_GT(segments.conversion_segments_size(), 0); DCHECK(!segments.conversion_segment(0).key().empty()); // Not zero query // Uses larger cutoff (kPredictionMaxResultsSize) in order to consider // all suffix entries. const size_t cutoff_threshold = kPredictionMaxResultsSize; const std::string kEmptyHistoryKey = ""; GetPredictiveResults(*suffix_dictionary_, kEmptyHistoryKey, request, segments, SUFFIX, cutoff_threshold, Segment::Candidate::SOURCE_INFO_NONE, unknown_id_, results); } void DictionaryPredictor::AggregateZeroQuerySuffixPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK_GT(segments.conversion_segments_size(), 0); DCHECK(segments.conversion_segment(0).key().empty()); if (AggregateNumberZeroQueryPrediction(request, segments, results)) { return; } AggregateZeroQueryPrediction(request, segments, results); if (IsLatinInputMode(request)) { // We do not want zero query results from suffix dictionary for Latin // input mode. For example, we do not need "です", "。" just after "when". return; } // Uses larger cutoff (kPredictionMaxResultsSize) in order to consider // all suffix entries. const size_t cutoff_threshold = kPredictionMaxResultsSize; const std::string kEmptyHistoryKey = ""; GetPredictiveResults( *suffix_dictionary_, kEmptyHistoryKey, request, segments, SUFFIX, cutoff_threshold, Segment::Candidate::DICTIONARY_PREDICTOR_ZERO_QUERY_SUFFIX, unknown_id_, results); } void DictionaryPredictor::AggregateEnglishPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK(results); DCHECK(dictionary_); const size_t cutoff_threshold = GetCandidateCutoffThreshold(segments); const size_t prev_results_size = results->size(); const std::string &input_key = segments.conversion_segment(0).key(); GetPredictiveResultsForEnglishKey(*dictionary_, request, input_key, ENGLISH, cutoff_threshold, results); size_t unigram_results_size = results->size() - prev_results_size; if (unigram_results_size >= cutoff_threshold) { results->resize(prev_results_size); return; } } void DictionaryPredictor::AggregateEnglishPredictionUsingRawInput( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK(results); DCHECK(dictionary_); if (!request.has_composer()) { return; } const size_t cutoff_threshold = GetCandidateCutoffThreshold(segments); const size_t prev_results_size = results->size(); std::string input_key; request.composer().GetRawString(&input_key); GetPredictiveResultsForEnglishKey(*dictionary_, request, input_key, ENGLISH, cutoff_threshold, results); size_t unigram_results_size = results->size() - prev_results_size; if (unigram_results_size >= cutoff_threshold) { results->resize(prev_results_size); return; } } void DictionaryPredictor::AggregateTypeCorrectingPrediction( const ConversionRequest &request, const Segments &segments, std::vector<Result> *results) const { DCHECK(results); DCHECK(dictionary_); const size_t prev_results_size = results->size(); if (prev_results_size > 10000) { return; } const size_t cutoff_threshold = GetCandidateCutoffThreshold(segments); // Currently, history key is never utilized. const std::string kEmptyHistoryKey = ""; GetPredictiveResultsUsingTypingCorrection( *dictionary_, kEmptyHistoryKey, request, segments, TYPING_CORRECTION, cutoff_threshold, results); if (results->size() - prev_results_size >= cutoff_threshold) { results->resize(prev_results_size); return; } } bool DictionaryPredictor::ShouldAggregateRealTimeConversionResults( const ConversionRequest &request, const Segments &segments) { constexpr size_t kMaxRealtimeKeySize = 300; // 300 bytes in UTF8 const std::string &key = segments.conversion_segment(0).key(); if (key.empty() || key.size() >= kMaxRealtimeKeySize) { // 1) If key is empty, realtime conversion doesn't work. // 2) If the key is too long, we'll hit a performance issue. return false; } return (segments.request_type() == Segments::PARTIAL_SUGGESTION || request.config().use_realtime_conversion() || IsMixedConversionEnabled(request.request())); } bool DictionaryPredictor::IsZipCodeRequest(const std::string &key) { if (key.empty()) { return false; } for (ConstChar32Iterator iter(key); !iter.Done(); iter.Next()) { const char32 c = iter.Get(); if (!('0' <= c && c <= '9') && (c != '-')) { return false; } } return true; } } // namespace mozc #undef MOZC_WORD_LOG_MESSAGE #undef MOZC_WORD_LOG
Java
//===-- HostThreadMacOSX.h --------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef lldb_Host_macosx_HostThreadMacOSX_h_ #define lldb_Host_macosx_HostThreadMacOSX_h_ #include "lldb/Host/posix/HostThreadPosix.h" namespace lldb_private { class HostThreadMacOSX : public HostThreadPosix { friend class ThreadLauncher; public: HostThreadMacOSX(); HostThreadMacOSX(lldb::thread_t thread); protected: static lldb::thread_result_t ThreadCreateTrampoline(lldb::thread_arg_t arg); }; } #endif
Java
'use strict'; angular.module('shopnxApp') .config(function ($stateProvider) { $stateProvider .state('checkout', { title: 'Checkout with the items you selected', url: '/checkout', templateUrl: 'app/checkout/checkout.html', controller: 'CheckoutCtrl', authenticate: true }); });
Java
package org.hisp.dhis.email; /* * Copyright (c) 2004-2018, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @author Zubair <[email protected]> */ public enum EmailResponse { SENT( "success" ), FAILED( "failed" ), ABORTED( "aborted" ), NOT_CONFIGURED( "no configuration found" ); private String responseMessage; EmailResponse( String responseMessage ) { this.responseMessage = responseMessage; } public String getResponseMessage() { return responseMessage; } public void setResponseMessage( String responseMessage ) { this.responseMessage = responseMessage; } }
Java
package cromwell.core.callcaching import org.scalatest.{FlatSpec, Matchers} class HashKeySpec extends FlatSpec with Matchers { "HashKey" should "produce consistent key value" in { val keys = Set( HashKey("command template"), HashKey("backend name"), HashKey("input count"), HashKey("output count"), HashKey("runtime attribute", "failOnStderr"), HashKey(checkForHitOrMiss = false, "runtime attribute", "cpu"), HashKey("runtime attribute", "continueOnReturnCode"), HashKey("input", "String stringInput"), HashKey("output", "String myOutput"), HashKey("runtime attribute", "docker") ) keys map { _.key } should contain theSameElementsAs Set( "command template", "backend name", "input count", "output count", "runtime attribute: failOnStderr", "runtime attribute: cpu", "runtime attribute: continueOnReturnCode", "input: String stringInput", "output: String myOutput", "runtime attribute: docker" ) } }
Java
/** * SAHARA Scheduling Server * * Schedules and assigns local laboratory rigs. * * @license See LICENSE in the top level directory for complete license terms. * * Copyright (c) 2009, University of Technology, Sydney * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University of Technology, Sydney nor the names * of its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * @author Tania Machet (tmachet) * @date 13 December 2010 */ package au.edu.uts.eng.remotelabs.schedserver.reports.intf.types; import java.io.Serializable; import java.util.ArrayList; import java.util.Calendar; import javax.xml.namespace.QName; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import javax.xml.stream.XMLStreamWriter; import org.apache.axiom.om.OMConstants; import org.apache.axiom.om.OMDataSource; import org.apache.axiom.om.OMElement; import org.apache.axiom.om.OMFactory; import org.apache.axiom.om.impl.llom.OMSourcedElementImpl; import org.apache.axis2.databinding.ADBBean; import org.apache.axis2.databinding.ADBDataSource; import org.apache.axis2.databinding.ADBException; import org.apache.axis2.databinding.utils.BeanUtil; import org.apache.axis2.databinding.utils.ConverterUtil; import org.apache.axis2.databinding.utils.reader.ADBXMLStreamReaderImpl; import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLStreamWriter; /** * QuerySessionReportType bean class. */ public class QuerySessionReportType implements ADBBean { /* * This type was generated from the piece of schema that had * name = QuerySessionReportType * Namespace URI = http://remotelabs.eng.uts.edu.au/schedserver/reports * Namespace Prefix = ns1 */ private static final long serialVersionUID = -5121246029757741056L; private static String generatePrefix(final String namespace) { if (namespace.equals("http://remotelabs.eng.uts.edu.au/schedserver/reports")) { return "ns1"; } return BeanUtil.getUniquePrefix(); } protected RequestorType requestor; public RequestorType getRequestor() { return this.requestor; } public void setRequestor(final RequestorType param) { this.requestor = param; } protected QueryFilterType querySelect; public QueryFilterType getQuerySelect() { return this.querySelect; } public void setQuerySelect(final QueryFilterType param) { this.querySelect = param; } protected QueryFilterType queryConstraints; protected boolean queryConstraintsTracker = false; public QueryFilterType getQueryConstraints() { return this.queryConstraints; } public void setQueryConstraints(final QueryFilterType param) { this.queryConstraints = param; this.queryConstraintsTracker = param != null; } protected Calendar startTime; protected boolean startTimeTracker = false; public Calendar getStartTime() { return this.startTime; } public void setStartTime(final Calendar param) { this.startTime = param; this.startTimeTracker = param != null; } protected Calendar endTime; protected boolean endTimeTracker = false; public Calendar getEndTime() { return this.endTime; } public void setEndTime(final Calendar param) { this.endTime = param; this.endTimeTracker = param != null; } protected PaginationType pagination; protected boolean paginationTracker = false; public PaginationType getPagination() { return this.pagination; } public void setPagination(final PaginationType param) { this.pagination = param; this.paginationTracker = param != null; } public static boolean isReaderMTOMAware(final XMLStreamReader reader) { boolean isReaderMTOMAware = false; try { isReaderMTOMAware = Boolean.TRUE.equals(reader.getProperty(OMConstants.IS_DATA_HANDLERS_AWARE)); } catch (final IllegalArgumentException e) { isReaderMTOMAware = false; } return isReaderMTOMAware; } public OMElement getOMElement(final QName parentQName, final OMFactory factory) throws ADBException { final OMDataSource dataSource = new ADBDataSource(this, parentQName) { @Override public void serialize(final MTOMAwareXMLStreamWriter xmlWriter) throws XMLStreamException { QuerySessionReportType.this.serialize(this.parentQName, factory, xmlWriter); } }; return new OMSourcedElementImpl(parentQName, factory, dataSource); } @Override public void serialize(final QName parentQName, final OMFactory factory, final MTOMAwareXMLStreamWriter xmlWriter) throws XMLStreamException, ADBException { this.serialize(parentQName, factory, xmlWriter, false); } @Override public void serialize(final QName parentQName, final OMFactory factory, final MTOMAwareXMLStreamWriter xmlWriter, final boolean serializeType) throws XMLStreamException, ADBException { String prefix = parentQName.getPrefix(); String namespace = parentQName.getNamespaceURI(); if ((namespace != null) && (namespace.trim().length() > 0)) { final String writerPrefix = xmlWriter.getPrefix(namespace); if (writerPrefix != null) { xmlWriter.writeStartElement(namespace, parentQName.getLocalPart()); } else { if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); } xmlWriter.writeStartElement(prefix, parentQName.getLocalPart(), namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } } else { xmlWriter.writeStartElement(parentQName.getLocalPart()); } if (serializeType) { final String namespacePrefix = this.registerPrefix(xmlWriter, "http://remotelabs.eng.uts.edu.au/schedserver/reports"); if ((namespacePrefix != null) && (namespacePrefix.trim().length() > 0)) { this.writeAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance", "type", namespacePrefix + ":QuerySessionReportType", xmlWriter); } else { this.writeAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance", "type", "QuerySessionReportType", xmlWriter); } } if (this.requestor == null) { throw new ADBException("requestor cannot be null!!"); } this.requestor.serialize(new QName("", "requestor"), factory, xmlWriter); if (this.querySelect == null) { throw new ADBException("querySelect cannot be null!!"); } this.querySelect.serialize(new QName("", "querySelect"), factory, xmlWriter); if (this.queryConstraintsTracker) { if (this.queryConstraints == null) { throw new ADBException("queryConstraints cannot be null!!"); } this.queryConstraints.serialize(new QName("", "queryConstraints"), factory, xmlWriter); } if (this.startTimeTracker) { namespace = ""; if (!namespace.equals("")) { prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); xmlWriter.writeStartElement(prefix, "startTime", namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } else { xmlWriter.writeStartElement(namespace, "startTime"); } } else { xmlWriter.writeStartElement("startTime"); } if (this.startTime == null) { throw new ADBException("startTime cannot be null!!"); } else { xmlWriter.writeCharacters(ConverterUtil.convertToString(this.startTime)); } xmlWriter.writeEndElement(); } if (this.endTimeTracker) { namespace = ""; if (!namespace.equals("")) { prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); xmlWriter.writeStartElement(prefix, "endTime", namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } else { xmlWriter.writeStartElement(namespace, "endTime"); } } else { xmlWriter.writeStartElement("endTime"); } if (this.endTime == null) { throw new ADBException("endTime cannot be null!!"); } else { xmlWriter.writeCharacters(ConverterUtil.convertToString(this.endTime)); } xmlWriter.writeEndElement(); } if (this.paginationTracker) { if (this.pagination == null) { throw new ADBException("pagination cannot be null!!"); } this.pagination.serialize(new QName("", "pagination"), factory, xmlWriter); } xmlWriter.writeEndElement(); } private void writeAttribute(final String prefix, final String namespace, final String attName, final String attValue, final XMLStreamWriter xmlWriter) throws XMLStreamException { if (xmlWriter.getPrefix(namespace) == null) { xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } xmlWriter.writeAttribute(namespace, attName, attValue); } private String registerPrefix(final XMLStreamWriter xmlWriter, final String namespace) throws XMLStreamException { String prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); while (xmlWriter.getNamespaceContext().getNamespaceURI(prefix) != null) { prefix = BeanUtil.getUniquePrefix(); } xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } return prefix; } @Override public XMLStreamReader getPullParser(final QName qName) throws ADBException { final ArrayList<Serializable> elementList = new ArrayList<Serializable>(); elementList.add(new QName("", "requestor")); if (this.requestor == null) { throw new ADBException("requestor cannot be null!!"); } elementList.add(this.requestor); elementList.add(new QName("", "querySelect")); if (this.querySelect == null) { throw new ADBException("querySelect cannot be null!!"); } elementList.add(this.querySelect); if (this.queryConstraintsTracker) { elementList.add(new QName("", "queryConstraints")); if (this.queryConstraints == null) { throw new ADBException("queryConstraints cannot be null!!"); } elementList.add(this.queryConstraints); } if (this.startTimeTracker) { elementList.add(new QName("", "startTime")); if (this.startTime != null) { elementList.add(ConverterUtil.convertToString(this.startTime)); } else { throw new ADBException("startTime cannot be null!!"); } } if (this.endTimeTracker) { elementList.add(new QName("", "endTime")); if (this.endTime != null) { elementList.add(ConverterUtil.convertToString(this.endTime)); } else { throw new ADBException("endTime cannot be null!!"); } } if (this.paginationTracker) { elementList.add(new QName("", "pagination")); if (this.pagination == null) { throw new ADBException("pagination cannot be null!!"); } elementList.add(this.pagination); } return new ADBXMLStreamReaderImpl(qName, elementList.toArray(), new Object[0]); } public static class Factory { public static QuerySessionReportType parse(final XMLStreamReader reader) throws Exception { final QuerySessionReportType object = new QuerySessionReportType(); try { while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.getAttributeValue("http://www.w3.org/2001/XMLSchema-instance", "type") != null) { final String fullTypeName = reader.getAttributeValue("http://www.w3.org/2001/XMLSchema-instance", "type"); if (fullTypeName != null) { String nsPrefix = null; if (fullTypeName.indexOf(":") > -1) { nsPrefix = fullTypeName.substring(0, fullTypeName.indexOf(":")); } nsPrefix = nsPrefix == null ? "" : nsPrefix; final String type = fullTypeName.substring(fullTypeName.indexOf(":") + 1); if (!"QuerySessionReportType".equals(type)) { final String nsUri = reader.getNamespaceContext().getNamespaceURI(nsPrefix); return (QuerySessionReportType) ExtensionMapper.getTypeObject(nsUri, type, reader); } } } reader.next(); while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "requestor").equals(reader.getName())) { object.setRequestor(RequestorType.Factory.parse(reader)); reader.next(); } else { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "querySelect").equals(reader.getName())) { object.setQuerySelect(QueryFilterType.Factory.parse(reader)); reader.next(); } else { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "queryConstraints").equals(reader.getName())) { object.setQueryConstraints(QueryFilterType.Factory.parse(reader)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "startTime").equals(reader.getName())) { final String content = reader.getElementText(); object.setStartTime(ConverterUtil.convertToDateTime(content)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "endTime").equals(reader.getName())) { final String content = reader.getElementText(); object.setEndTime(ConverterUtil.convertToDateTime(content)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "pagination").equals(reader.getName())) { object.setPagination(PaginationType.Factory.parse(reader)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement()) { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } } catch (final XMLStreamException e) { throw new Exception(e); } return object; } } }
Java
/** * Copyright (c) 2016, The National Archives <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the The National Archives nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.1-b02-fcs // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2010.03.22 at 11:40:59 AM GMT // package uk.gov.nationalarchives.droid.report.planets.domain; import java.math.BigDecimal; import java.math.BigInteger; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.datatype.XMLGregorianCalendar; /** * <p>Java class for YearItemType complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="YearItemType"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="year" type="{http://www.w3.org/2001/XMLSchema}gYear"/&gt; * &lt;element name="numFiles" type="{http://www.w3.org/2001/XMLSchema}integer"/&gt; * &lt;element name="totalFileSize" type="{http://www.w3.org/2001/XMLSchema}decimal"/&gt; * &lt;/sequence&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * @deprecated PLANETS XML is now generated using XSLT over normal report xml files. */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "YearItemType", propOrder = { "year", "numFiles", "totalFileSize" }) @Deprecated public class YearItemType { @XmlElement(required = true) @XmlSchemaType(name = "gYear") protected XMLGregorianCalendar year; @XmlElement(required = true) protected BigInteger numFiles; @XmlElement(required = true) protected BigDecimal totalFileSize; /** * Gets the value of the year property. * * @return * possible object is * {@link XMLGregorianCalendar } * */ public XMLGregorianCalendar getYear() { return year; } /** * Sets the value of the year property. * * @param value * allowed object is * {@link XMLGregorianCalendar } * */ public void setYear(XMLGregorianCalendar value) { this.year = value; } /** * Gets the value of the numFiles property. * * @return * possible object is * {@link BigInteger } * */ public BigInteger getNumFiles() { return numFiles; } /** * Sets the value of the numFiles property. * * @param value * allowed object is * {@link BigInteger } * */ public void setNumFiles(BigInteger value) { this.numFiles = value; } /** * Gets the value of the totalFileSize property. * * @return * possible object is * {@link BigDecimal } * */ public BigDecimal getTotalFileSize() { return totalFileSize; } /** * Sets the value of the totalFileSize property. * * @param value * allowed object is * {@link BigDecimal } * */ public void setTotalFileSize(BigDecimal value) { this.totalFileSize = value; } }
Java
/** * Copyright (c) 2016, The National Archives <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of the The National Archives nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package uk.gov.nationalarchives.droid.command.action; import java.io.PrintWriter; import java.util.Map; import uk.gov.nationalarchives.droid.command.i18n.I18N; import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureFileException; import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureFileInfo; import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureManager; import uk.gov.nationalarchives.droid.core.interfaces.signature.SignatureType; /** * @author rflitcroft * */ public class DisplayDefaultSignatureFileVersionCommand implements DroidCommand { private PrintWriter printWriter; private SignatureManager signatureManager; /** * {@inheritDoc} */ @Override public void execute() throws CommandExecutionException { try { Map<SignatureType, SignatureFileInfo> sigFileInfos = signatureManager.getDefaultSignatures(); for (SignatureFileInfo info : sigFileInfos.values()) { printWriter.println(I18N.getResource(I18N.DEFAULT_SIGNATURE_VERSION, info.getType(), info.getVersion(), info.getFile().getName())); } } catch (SignatureFileException e) { throw new CommandExecutionException(e); } } /** * @param printWriter the printWriter to set */ public void setPrintWriter(PrintWriter printWriter) { this.printWriter = printWriter; } /** * @param signatureManager the signatureManager to set */ public void setSignatureManager(SignatureManager signatureManager) { this.signatureManager = signatureManager; } }
Java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE762_Mismatched_Memory_Management_Routines__delete_int64_t_calloc_52c.cpp Label Definition File: CWE762_Mismatched_Memory_Management_Routines__delete.label.xml Template File: sources-sinks-52c.tmpl.cpp */ /* * @description * CWE: 762 Mismatched Memory Management Routines * BadSource: calloc Allocate data using calloc() * GoodSource: Allocate data using new * Sinks: * GoodSink: Deallocate data using free() * BadSink : Deallocate data using delete * Flow Variant: 52 Data flow: data passed as an argument from one function to another to another in three different source files * * */ #include "std_testcase.h" namespace CWE762_Mismatched_Memory_Management_Routines__delete_int64_t_calloc_52 { #ifndef OMITBAD void badSink_c(int64_t * data) { /* POTENTIAL FLAW: Deallocate memory using delete - the source memory allocation function may * require a call to free() to deallocate the memory */ delete data; } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink_c(int64_t * data) { /* POTENTIAL FLAW: Deallocate memory using delete - the source memory allocation function may * require a call to free() to deallocate the memory */ delete data; } /* goodB2G uses the BadSource with the GoodSink */ void goodB2GSink_c(int64_t * data) { /* FIX: Deallocate the memory using free() */ free(data); } #endif /* OMITGOOD */ } /* close namespace */
Java
# ########################################################################### # # $Id: Makefile,v 1.3 2008/03/26 16:35:00 cnepveu Exp $ # # Copyright (c) 2007 Hexago Inc. All rights reserved. # # For license information refer to CLIENT-LICENSE.TXT # # Description: Makefile for module library gw6c-pal # (Gateway6 Client Platform Abstraction Layer) # # Author: Charles Nepveu # # Date: August 2007 # # ########################################################################### # # # Target directory definition. # linux_CDIR=common unix-common freebsd_CDIR=common unix-common openbsd_CDIR=common unix-common netbsd_CDIR=common unix-common netbsd darwin_CDIR=common unix-common sunos_CDIR=common unix-common dongle6_CDIR=common unix-common # # ########################################################################### # .PHONY: env-check platform-obj platform-inc # This makefile target will check the execution context and environment. # env-check: @[ -n "${PLATFORM}" ] || { echo "Error: Invalid environment." ; exit 1 ; } @[ -n "${PLATFORM_DIR}" ] || { echo "Error: Invalid environment." ; exit 1 ; } @[ -n "${OBJS_DIR}" ] || { echo "Error: Invalid environment." ; exit 1 ; } @[ -n "${DEFS_DIR}" ] || { echo "Error: Invalid environment." ; exit 1 ; } @[ -n "${OUT_INC_DIR}" ] || { echo "Error: Invalid environment." ; exit 1 ; } # This makefile target will compile the platform PAL objects. # platform-obj: env-check @for dir in $(${PLATFORM}_CDIR) ; do \ $(MAKE) -C $$dir platform-obj ; \ done # This makefile target will copy the platform includes to out_inc. # platform-inc: env-check @for dir in $(${PLATFORM}_CDIR) ; do \ $(MAKE) -C $$dir platform-inc ; \ done # # ###########################################################################
Java
/* * Copyright (c) 2010-2011 Mark Allen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.restfb; import static com.restfb.json.JsonObject.NULL; import static com.restfb.util.ReflectionUtils.findFieldsWithAnnotation; import static com.restfb.util.ReflectionUtils.getFirstParameterizedTypeArgument; import static com.restfb.util.ReflectionUtils.isPrimitive; import static com.restfb.util.StringUtils.isBlank; import static com.restfb.util.StringUtils.trimToEmpty; import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableSet; import static java.util.logging.Level.FINE; import static java.util.logging.Level.FINER; import static java.util.logging.Level.FINEST; import java.lang.reflect.Field; import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.logging.Logger; import com.restfb.exception.FacebookJsonMappingException; import com.restfb.json.JsonArray; import com.restfb.json.JsonException; import com.restfb.json.JsonObject; import com.restfb.types.Post.Comments; import com.restfb.util.ReflectionUtils.FieldWithAnnotation; /** * Default implementation of a JSON-to-Java mapper. * * @author <a href="http://restfb.com">Mark Allen</a> */ public class DefaultJsonMapper implements JsonMapper { /** * Logger. */ private static final Logger logger = Logger.getLogger(DefaultJsonMapper.class.getName()); /** * @see com.restfb.JsonMapper#toJavaList(String, Class) */ @Override public <T> List<T> toJavaList(String json, Class<T> type) { json = trimToEmpty(json); if (isBlank(json)) throw new FacebookJsonMappingException("JSON is an empty string - can't map it."); if (type == null) throw new FacebookJsonMappingException("You must specify the Java type to map to."); if (json.startsWith("{")) { // Sometimes Facebook returns the empty object {} when it really should be // returning an empty list [] (example: do an FQL query for a user's // affiliations - it's a list except when there are none, then it turns // into an object). Check for that special case here. if (isEmptyObject(json)) { if (logger.isLoggable(FINER)) logger.finer("Encountered {} when we should've seen []. " + "Mapping the {} as an empty list and moving on..."); return new ArrayList<T>(); } // Special case: if the only element of this object is an array called // "data", then treat it as a list. The Graph API uses this convention for // connections and in a few other places, e.g. comments on the Post // object. // Doing this simplifies mapping, so we don't have to worry about having a // little placeholder object that only has a "data" value. try { JsonObject jsonObject = new JsonObject(json); String[] fieldNames = JsonObject.getNames(jsonObject); if (fieldNames != null) { boolean hasSingleDataProperty = fieldNames.length == 1 && "data".equals(fieldNames[0]); Object jsonDataObject = jsonObject.get("data"); if (!hasSingleDataProperty && !(jsonDataObject instanceof JsonArray)) throw new FacebookJsonMappingException("JSON is an object but is being mapped as a list " + "instead. Offending JSON is '" + json + "'."); json = jsonDataObject.toString(); } } catch (JsonException e) { // Should never get here, but just in case... throw new FacebookJsonMappingException("Unable to convert Facebook response " + "JSON to a list of " + type.getName() + " instances. Offending JSON is " + json, e); } } try { List<T> list = new ArrayList<T>(); JsonArray jsonArray = new JsonArray(json); for (int i = 0; i < jsonArray.length(); i++) list.add(toJavaObject(jsonArray.get(i).toString(), type)); return unmodifiableList(list); } catch (FacebookJsonMappingException e) { throw e; } catch (Exception e) { throw new FacebookJsonMappingException("Unable to convert Facebook response " + "JSON to a list of " + type.getName() + " instances", e); } } /** * @see com.restfb.JsonMapper#toJavaObject(String, Class) */ @Override @SuppressWarnings("unchecked") public <T> T toJavaObject(String json, Class<T> type) { verifyThatJsonIsOfObjectType(json); try { // Are we asked to map to JsonObject? If so, short-circuit right away. if (type.equals(JsonObject.class)) return (T) new JsonObject(json); List<FieldWithAnnotation<Facebook>> fieldsWithAnnotation = findFieldsWithAnnotation(type, Facebook.class); Set<String> facebookFieldNamesWithMultipleMappings = facebookFieldNamesWithMultipleMappings(fieldsWithAnnotation); // If there are no annotated fields, assume we're mapping to a built-in // type. If this is actually the empty object, just return a new instance // of the corresponding Java type. if (fieldsWithAnnotation.size() == 0) if (isEmptyObject(json)) return createInstance(type); else return toPrimitiveJavaType(json, type); // Facebook will sometimes return the string "null". // Check for that and bail early if we find it. if ("null".equals(json)) return null; // Facebook will sometimes return the string "false" to mean null. // Check for that and bail early if we find it. if ("false".equals(json)) { if (logger.isLoggable(FINE)) logger.fine("Encountered 'false' from Facebook when trying to map to " + type.getSimpleName() + " - mapping null instead."); return null; } JsonObject jsonObject = new JsonObject(json); T instance = createInstance(type); if (instance instanceof JsonObject) return (T) jsonObject; // For each Facebook-annotated field on the current Java object, pull data // out of the JSON object and put it in the Java object for (FieldWithAnnotation<Facebook> fieldWithAnnotation : fieldsWithAnnotation) { String facebookFieldName = getFacebookFieldName(fieldWithAnnotation); if (!jsonObject.has(facebookFieldName)) { if (logger.isLoggable(FINER)) logger.finer("No JSON value present for '" + facebookFieldName + "', skipping. JSON is '" + json + "'."); continue; } fieldWithAnnotation.getField().setAccessible(true); // Set the Java field's value. // // If we notice that this Facebook field name is mapped more than once, // go into a special mode where we swallow any exceptions that occur // when mapping to the Java field. This is because Facebook will // sometimes return data in different formats for the same field name. // See issues 56 and 90 for examples of this behavior and discussion. if (facebookFieldNamesWithMultipleMappings.contains(facebookFieldName)) { try { fieldWithAnnotation.getField() .set(instance, toJavaType(fieldWithAnnotation, jsonObject, facebookFieldName)); } catch (FacebookJsonMappingException e) { logMultipleMappingFailedForField(facebookFieldName, fieldWithAnnotation, json); } catch (JsonException e) { logMultipleMappingFailedForField(facebookFieldName, fieldWithAnnotation, json); } } else { fieldWithAnnotation.getField().set(instance, toJavaType(fieldWithAnnotation, jsonObject, facebookFieldName)); } } return instance; } catch (FacebookJsonMappingException e) { throw e; } catch (Exception e) { throw new FacebookJsonMappingException("Unable to map JSON to Java. Offending JSON is '" + json + "'.", e); } } /** * Dumps out a log message when one of a multiple-mapped Facebook field name * JSON-to-Java mapping operation fails. * * @param facebookFieldName * The Facebook field name. * @param fieldWithAnnotation * The Java field to map to and its annotation. * @param json * The JSON that failed to map to the Java field. */ protected void logMultipleMappingFailedForField(String facebookFieldName, FieldWithAnnotation<Facebook> fieldWithAnnotation, String json) { if (!logger.isLoggable(FINER)) return; Field field = fieldWithAnnotation.getField(); if (logger.isLoggable(FINER)) logger.finer("Could not map '" + facebookFieldName + "' to " + field.getDeclaringClass().getSimpleName() + "." + field.getName() + ", but continuing on because '" + facebookFieldName + "' is mapped to multiple fields in " + field.getDeclaringClass().getSimpleName() + ". JSON is " + json); } /** * For a Java field annotated with the {@code Facebook} annotation, figure out * what the corresponding Facebook JSON field name to map to it is. * * @param fieldWithAnnotation * A Java field annotated with the {@code Facebook} annotation. * @return The Facebook JSON field name that should be mapped to this Java * field. */ protected String getFacebookFieldName(FieldWithAnnotation<Facebook> fieldWithAnnotation) { String facebookFieldName = fieldWithAnnotation.getAnnotation().value(); Field field = fieldWithAnnotation.getField(); // If no Facebook field name was specified in the annotation, assume // it's the same name as the Java field if (isBlank(facebookFieldName)) { if (logger.isLoggable(FINEST)) logger.finest("No explicit Facebook field name found for " + field + ", so defaulting to the field name itself (" + field.getName() + ")"); facebookFieldName = field.getName(); } return facebookFieldName; } /** * Finds any Facebook JSON fields that are mapped to more than 1 Java field. * * @param fieldsWithAnnotation * Java fields annotated with the {@code Facebook} annotation. * @return Any Facebook JSON fields that are mapped to more than 1 Java field. */ protected Set<String> facebookFieldNamesWithMultipleMappings(List<FieldWithAnnotation<Facebook>> fieldsWithAnnotation) { Map<String, Integer> facebookFieldsNamesWithOccurrenceCount = new HashMap<String, Integer>(); Set<String> facebookFieldNamesWithMultipleMappings = new HashSet<String>(); // Get a count of Facebook field name occurrences for each // @Facebook-annotated field for (FieldWithAnnotation<Facebook> fieldWithAnnotation : fieldsWithAnnotation) { String fieldName = getFacebookFieldName(fieldWithAnnotation); int occurrenceCount = facebookFieldsNamesWithOccurrenceCount.containsKey(fieldName) ? facebookFieldsNamesWithOccurrenceCount .get(fieldName) : 0; facebookFieldsNamesWithOccurrenceCount.put(fieldName, occurrenceCount + 1); } // Pull out only those field names with multiple mappings for (Entry<String, Integer> entry : facebookFieldsNamesWithOccurrenceCount.entrySet()) if (entry.getValue() > 1) facebookFieldNamesWithMultipleMappings.add(entry.getKey()); return unmodifiableSet(facebookFieldNamesWithMultipleMappings); } /** * @see com.restfb.JsonMapper#toJson(Object) */ @Override public String toJson(Object object) { // Delegate to recursive method return toJsonInternal(object).toString(); } /** * Is the given {@code json} a valid JSON object? * * @param json * The JSON to check. * @throws FacebookJsonMappingException * If {@code json} is not a valid JSON object. */ protected void verifyThatJsonIsOfObjectType(String json) { if (isBlank(json)) throw new FacebookJsonMappingException("JSON is an empty string - can't map it."); if (json.startsWith("[")) throw new FacebookJsonMappingException("JSON is an array but is being mapped as an object " + "- you should map it as a List instead. Offending JSON is '" + json + "'."); } /** * Recursively marshal the given {@code object} to JSON. * <p> * Used by {@link #toJson(Object)}. * * @param object * The object to marshal. * @return JSON representation of the given {@code object}. * @throws FacebookJsonMappingException * If an error occurs while marshaling to JSON. */ protected Object toJsonInternal(Object object) { if (object == null) return NULL; if (object instanceof List<?>) { JsonArray jsonArray = new JsonArray(); for (Object o : (List<?>) object) jsonArray.put(toJsonInternal(o)); return jsonArray; } if (object instanceof Map<?, ?>) { JsonObject jsonObject = new JsonObject(); for (Entry<?, ?> entry : ((Map<?, ?>) object).entrySet()) { if (!(entry.getKey() instanceof String)) throw new FacebookJsonMappingException("Your Map keys must be of type " + String.class + " in order to be converted to JSON. Offending map is " + object); try { jsonObject.put((String) entry.getKey(), toJsonInternal(entry.getValue())); } catch (JsonException e) { throw new FacebookJsonMappingException("Unable to process value '" + entry.getValue() + "' for key '" + entry.getKey() + "' in Map " + object, e); } } return jsonObject; } if (isPrimitive(object)) return object; if (object instanceof BigInteger) return ((BigInteger) object).longValue(); if (object instanceof BigDecimal) return ((BigDecimal) object).doubleValue(); // We've passed the special-case bits, so let's try to marshal this as a // plain old Javabean... List<FieldWithAnnotation<Facebook>> fieldsWithAnnotation = findFieldsWithAnnotation(object.getClass(), Facebook.class); JsonObject jsonObject = new JsonObject(); Set<String> facebookFieldNamesWithMultipleMappings = facebookFieldNamesWithMultipleMappings(fieldsWithAnnotation); if (facebookFieldNamesWithMultipleMappings.size() > 0) throw new FacebookJsonMappingException("Unable to convert to JSON because multiple @" + Facebook.class.getSimpleName() + " annotations for the same name are present: " + facebookFieldNamesWithMultipleMappings); for (FieldWithAnnotation<Facebook> fieldWithAnnotation : fieldsWithAnnotation) { String facebookFieldName = getFacebookFieldName(fieldWithAnnotation); fieldWithAnnotation.getField().setAccessible(true); try { jsonObject.put(facebookFieldName, toJsonInternal(fieldWithAnnotation.getField().get(object))); } catch (Exception e) { throw new FacebookJsonMappingException("Unable to process field '" + facebookFieldName + "' for " + object.getClass(), e); } } return jsonObject; } /** * Given a {@code json} value of something like {@code MyValue} or {@code 123} * , return a representation of that value of type {@code type}. * <p> * This is to support non-legal JSON served up by Facebook for API calls like * {@code Friends.get} (example result: {@code [222333,1240079]}). * * @param <T> * The Java type to map to. * @param json * The non-legal JSON to map to the Java type. * @param type * Type token. * @return Java representation of {@code json}. * @throws FacebookJsonMappingException * If an error occurs while mapping JSON to Java. */ @SuppressWarnings("unchecked") protected <T> T toPrimitiveJavaType(String json, Class<T> type) { if (String.class.equals(type)) { // If the string starts and ends with quotes, remove them, since Facebook // can serve up strings surrounded by quotes. if (json.length() > 1 && json.startsWith("\"") && json.endsWith("\"")) { json = json.replaceFirst("\"", ""); json = json.substring(0, json.length() - 1); } return (T) json; } if (Integer.class.equals(type) || Integer.TYPE.equals(type)) return (T) new Integer(json); if (Boolean.class.equals(type) || Boolean.TYPE.equals(type)) return (T) new Boolean(json); if (Long.class.equals(type) || Long.TYPE.equals(type)) return (T) new Long(json); if (Double.class.equals(type) || Double.TYPE.equals(type)) return (T) new Double(json); if (Float.class.equals(type) || Float.TYPE.equals(type)) return (T) new Float(json); if (BigInteger.class.equals(type)) return (T) new BigInteger(json); if (BigDecimal.class.equals(type)) return (T) new BigDecimal(json); throw new FacebookJsonMappingException("Don't know how to map JSON to " + type + ". Are you sure you're mapping to the right class? " + "Offending JSON is '" + json + "'."); } /** * Extracts JSON data for a field according to its {@code Facebook} annotation * and returns it converted to the proper Java type. * * @param fieldWithAnnotation * The field/annotation pair which specifies what Java type to * convert to. * @param jsonObject * "Raw" JSON object to pull data from. * @param facebookFieldName * Specifies what JSON field to pull "raw" data from. * @return A * @throws JsonException * If an error occurs while mapping JSON to Java. * @throws FacebookJsonMappingException * If an error occurs while mapping JSON to Java. */ protected Object toJavaType(FieldWithAnnotation<Facebook> fieldWithAnnotation, JsonObject jsonObject, String facebookFieldName) throws JsonException, FacebookJsonMappingException { Class<?> type = fieldWithAnnotation.getField().getType(); Object rawValue = jsonObject.get(facebookFieldName); // Short-circuit right off the bat if we've got a null value. if (NULL.equals(rawValue)) return null; if (String.class.equals(type)) { // Special handling here for better error checking. // Since JsonObject.getString() will return literal JSON text even if it's // _not_ a JSON string, we check the marshaled type and bail if needed. // For example, calling JsonObject.getString("results") on the below // JSON... // {"results":[{"name":"Mark Allen"}]} // ... would return the string "[{"name":"Mark Allen"}]" instead of // throwing an error. So we throw the error ourselves. // Per Antonello Naccarato, sometimes FB will return an empty JSON array // instead of an empty string. Look for that here. if (rawValue instanceof JsonArray) if (((JsonArray) rawValue).length() == 0) { if (logger.isLoggable(FINER)) logger.finer("Coercing an empty JSON array " + "to an empty string for " + fieldWithAnnotation); return ""; } // If the user wants a string, _always_ give her a string. // This is useful if, for example, you've got a @Facebook-annotated string // field that you'd like to have a numeric type shoved into. // User beware: this will turn *anything* into a string, which might lead // to results you don't expect. return rawValue.toString(); } if (Integer.class.equals(type) || Integer.TYPE.equals(type)) return new Integer(jsonObject.getInt(facebookFieldName)); if (Boolean.class.equals(type) || Boolean.TYPE.equals(type)) return new Boolean(jsonObject.getBoolean(facebookFieldName)); if (Long.class.equals(type) || Long.TYPE.equals(type)) return new Long(jsonObject.getLong(facebookFieldName)); if (Double.class.equals(type) || Double.TYPE.equals(type)) return new Double(jsonObject.getDouble(facebookFieldName)); if (Float.class.equals(type) || Float.TYPE.equals(type)) return new BigDecimal(jsonObject.getString(facebookFieldName)).floatValue(); if (BigInteger.class.equals(type)) return new BigInteger(jsonObject.getString(facebookFieldName)); if (BigDecimal.class.equals(type)) return new BigDecimal(jsonObject.getString(facebookFieldName)); if (List.class.equals(type)) return toJavaList(rawValue.toString(), getFirstParameterizedTypeArgument(fieldWithAnnotation.getField())); String rawValueAsString = rawValue.toString(); // Hack for issue 76 where FB will sometimes return a Post's Comments as // "[]" instead of an object type (wtf) if (Comments.class.isAssignableFrom(type) && rawValue instanceof JsonArray) { if (logger.isLoggable(FINE)) logger.fine("Encountered comment array '" + rawValueAsString + "' but expected a " + Comments.class.getSimpleName() + " object instead. Working around that " + "by coercing into an empty " + Comments.class.getSimpleName() + " instance..."); JsonObject workaroundJsonObject = new JsonObject(); workaroundJsonObject.put("count", 0); workaroundJsonObject.put("data", new JsonArray()); rawValueAsString = workaroundJsonObject.toString(); } // Some other type - recurse into it return toJavaObject(rawValueAsString, type); } /** * Creates a new instance of the given {@code type}. * * @param <T> * Java type to map to. * @param type * Type token. * @return A new instance of {@code type}. * @throws FacebookJsonMappingException * If an error occurs when creating a new instance ({@code type} is * inaccessible, doesn't have a public no-arg constructor, etc.) */ protected <T> T createInstance(Class<T> type) { String errorMessage = "Unable to create an instance of " + type + ". Please make sure that it's marked 'public' " + "and, if it's a nested class, is marked 'static'. " + "It should have a public, no-argument constructor."; try { return type.newInstance(); } catch (IllegalAccessException e) { throw new FacebookJsonMappingException(errorMessage, e); } catch (InstantiationException e) { throw new FacebookJsonMappingException(errorMessage, e); } } /** * Is the given JSON equivalent to the empty object (<code>{}</code>)? * * @param json * The JSON to check. * @return {@code true} if the JSON is equivalent to the empty object, * {@code false} otherwise. */ protected boolean isEmptyObject(String json) { return "{}".equals(json); } }
Java
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_PUBLIC_BROWSER_DOWNLOAD_DANGER_TYPE_H_ #define CONTENT_PUBLIC_BROWSER_DOWNLOAD_DANGER_TYPE_H_ #pragma once namespace content { // This enum is also used by histograms. Do not change the ordering or remove // items. enum DownloadDangerType { // The download is safe. DOWNLOAD_DANGER_TYPE_NOT_DANGEROUS = 0, // A dangerous file to the system (e.g.: a pdf or extension from // places other than gallery). DOWNLOAD_DANGER_TYPE_DANGEROUS_FILE, // Safebrowsing download service shows this URL leads to malicious file // download. DOWNLOAD_DANGER_TYPE_DANGEROUS_URL, // SafeBrowsing download service shows this file content as being malicious. DOWNLOAD_DANGER_TYPE_DANGEROUS_CONTENT, // The content of this download may be malicious (e.g., extension is exe but // SafeBrowsing has not finished checking the content). DOWNLOAD_DANGER_TYPE_MAYBE_DANGEROUS_CONTENT, // Memory space for histograms is determined by the max. // ALWAYS ADD NEW VALUES BEFORE THIS ONE. DOWNLOAD_DANGER_TYPE_MAX }; } #endif // CONTENT_PUBLIC_BROWSER_DOWNLOAD_DANGER_TYPE_H_
Java
// Copyright NVIDIA Corporation 2012 // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <dp/ui/RenderTarget.h> namespace dp { namespace ui { RenderTarget::~RenderTarget() { } bool RenderTarget::beginRendering() { return true; } void RenderTarget::endRendering() { } bool RenderTarget::isStereoEnabled() const { return false; } bool RenderTarget::setStereoTarget( StereoTarget target ) { return target == LEFT; } RenderTarget::StereoTarget RenderTarget::getStereoTarget() const { return LEFT; } } // namespace ui } // namespace dp
Java
import PromiseRouter from '../PromiseRouter'; import * as middleware from "../middlewares"; import { Parse } from "parse/node"; export class PushRouter extends PromiseRouter { mountRoutes() { this.route("POST", "/push", middleware.promiseEnforceMasterKeyAccess, PushRouter.handlePOST); } static handlePOST(req) { const pushController = req.config.pushController; if (!pushController) { throw new Parse.Error(Parse.Error.PUSH_MISCONFIGURED, 'Push controller is not set'); } let where = PushRouter.getQueryCondition(req); pushController.sendPush(req.body, where, req.config, req.auth); return Promise.resolve({ response: { 'result': true } }); } /** * Get query condition from the request body. * @param {Object} req A request object * @returns {Object} The query condition, the where field in a query api call */ static getQueryCondition(req) { let body = req.body || {}; let hasWhere = typeof body.where !== 'undefined'; let hasChannels = typeof body.channels !== 'undefined'; let where; if (hasWhere && hasChannels) { throw new Parse.Error(Parse.Error.PUSH_MISCONFIGURED, 'Channels and query can not be set at the same time.'); } else if (hasWhere) { where = body.where; } else if (hasChannels) { where = { "channels": { "$in": body.channels } } } else { throw new Parse.Error(Parse.Error.PUSH_MISCONFIGURED, 'Channels and query should be set at least one.'); } return where; } } export default PushRouter;
Java
// Copyright 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Enumerate the various item subtypes that are supported by sync. // Each sync object is expected to have an immutable object type. // An object's type is inferred from the type of data it holds. #ifndef SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_ #define SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_ #include <map> #include <set> #include <string> #include "base/logging.h" #include "base/memory/scoped_ptr.h" #include "sync/base/sync_export.h" #include "sync/internal_api/public/base/enum_set.h" namespace base { class ListValue; class StringValue; class Value; } namespace sync_pb { class EntitySpecifics; class SyncEntity; } namespace syncer { // TODO(akalin): Move the non-exported functions in this file to a // private header. // A Java counterpart will be generated for this enum. // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.sync // |kModelTypeInfoMap| struct entries are in the same order as their definition // in ModelType enum. Don't forget to update the |kModelTypeInfoMap| struct in // model_type.cc when you make changes in ModelType enum. enum ModelType { // Object type unknown. Objects may transition through // the unknown state during their initial creation, before // their properties are set. After deletion, object types // are generally preserved. UNSPECIFIED, // A permanent folder whose children may be of mixed // datatypes (e.g. the "Google Chrome" folder). TOP_LEVEL_FOLDER, // ------------------------------------ Start of "real" model types. // The model types declared before here are somewhat special, as they // they do not correspond to any browser data model. The remaining types // are bona fide model types; all have a related browser data model and // can be represented in the protocol using a specific Message type in the // EntitySpecifics protocol buffer. // // A bookmark folder or a bookmark URL object. BOOKMARKS, FIRST_USER_MODEL_TYPE = BOOKMARKS, // Declared 2nd, for debugger prettiness. FIRST_REAL_MODEL_TYPE = FIRST_USER_MODEL_TYPE, // A preference object. PREFERENCES, // A password object. PASSWORDS, // An AutofillProfile Object AUTOFILL_PROFILE, // An autofill object. AUTOFILL, // Credit cards and addresses synced from the user's account. These are // read-only on the client. AUTOFILL_WALLET_DATA, // Usage counts and last use dates for Wallet cards and addresses. This data // is both readable and writable. AUTOFILL_WALLET_METADATA, // A themes object. THEMES, // A typed_url object. TYPED_URLS, // An extension object. EXTENSIONS, // An object representing a custom search engine. SEARCH_ENGINES, // An object representing a browser session. SESSIONS, // An app object. APPS, // An app setting from the extension settings API. APP_SETTINGS, // An extension setting from the extension settings API. EXTENSION_SETTINGS, // App notifications. APP_NOTIFICATIONS, // Deprecated. // History delete directives. HISTORY_DELETE_DIRECTIVES, // Synced push notifications. SYNCED_NOTIFICATIONS, // Deprecated. // Synced Notification app info. SYNCED_NOTIFICATION_APP_INFO, // Deprecated. // Custom spelling dictionary. DICTIONARY, // Favicon images. FAVICON_IMAGES, // Favicon tracking information. FAVICON_TRACKING, // Client-specific metadata, synced before other user types. DEVICE_INFO, // These preferences are synced before other user types and are never // encrypted. PRIORITY_PREFERENCES, // Supervised user settings. SUPERVISED_USER_SETTINGS, // Supervised users. Every supervised user is a profile that is configured // remotely by this user and can have restrictions applied. SUPERVISED_USERS // and SUPERVISED_USER_SETTINGS can not be encrypted. SUPERVISED_USERS, // Supervised user shared settings. Shared settings can be modified both by // the manager and the supervised user. SUPERVISED_USER_SHARED_SETTINGS, // Distilled articles. ARTICLES, // App List items APP_LIST, // WiFi credentials. Each item contains the information for connecting to one // WiFi network. This includes, e.g., network name and password. WIFI_CREDENTIALS, // Supervised user whitelists. Each item contains a CRX ID (like an extension // ID) and a name. SUPERVISED_USER_WHITELISTS, // ---- Proxy types ---- // Proxy types are excluded from the sync protocol, but are still considered // real user types. By convention, we prefix them with 'PROXY_' to distinguish // them from normal protocol types. // Tab sync. This is a placeholder type, so that Sessions can be implicitly // enabled for history sync and tabs sync. PROXY_TABS, FIRST_PROXY_TYPE = PROXY_TABS, LAST_PROXY_TYPE = PROXY_TABS, LAST_USER_MODEL_TYPE = PROXY_TABS, // ---- Control Types ---- // An object representing a set of Nigori keys. NIGORI, FIRST_CONTROL_MODEL_TYPE = NIGORI, // Flags to enable experimental features. EXPERIMENTS, LAST_CONTROL_MODEL_TYPE = EXPERIMENTS, LAST_REAL_MODEL_TYPE = LAST_CONTROL_MODEL_TYPE, // If you are adding a new sync datatype that is exposed to the user via the // sync preferences UI, be sure to update the list in // components/sync_driver/user_selectable_sync_type.h so that the UMA // histograms for sync include your new type. In this case, be sure to also // update the UserSelectableTypes() definition in // sync/syncable/model_type.cc. MODEL_TYPE_COUNT, }; typedef EnumSet<ModelType, FIRST_REAL_MODEL_TYPE, LAST_REAL_MODEL_TYPE> ModelTypeSet; typedef EnumSet<ModelType, UNSPECIFIED, LAST_REAL_MODEL_TYPE> FullModelTypeSet; typedef std::map<syncer::ModelType, const char*> ModelTypeNameMap; inline ModelType ModelTypeFromInt(int i) { DCHECK_GE(i, 0); DCHECK_LT(i, MODEL_TYPE_COUNT); return static_cast<ModelType>(i); } // Used by tests outside of sync/. SYNC_EXPORT void AddDefaultFieldValue(ModelType datatype, sync_pb::EntitySpecifics* specifics); // Extract the model type of a SyncEntity protocol buffer. ModelType is a // local concept: the enum is not in the protocol. The SyncEntity's ModelType // is inferred from the presence of particular datatype field in the // entity specifics. SYNC_EXPORT_PRIVATE ModelType GetModelType( const sync_pb::SyncEntity& sync_entity); // Extract the model type from an EntitySpecifics field. Note that there // are some ModelTypes (like TOP_LEVEL_FOLDER) that can't be inferred this way; // prefer using GetModelType where possible. SYNC_EXPORT ModelType GetModelTypeFromSpecifics( const sync_pb::EntitySpecifics& specifics); // Protocol types are those types that have actual protocol buffer // representations. This distinguishes them from Proxy types, which have no // protocol representation and are never sent to the server. SYNC_EXPORT ModelTypeSet ProtocolTypes(); // These are the normal user-controlled types. This is to distinguish from // ControlTypes which are always enabled. Note that some of these share a // preference flag, so not all of them are individually user-selectable. SYNC_EXPORT ModelTypeSet UserTypes(); // These are the user-selectable data types. SYNC_EXPORT ModelTypeSet UserSelectableTypes(); SYNC_EXPORT bool IsUserSelectableType(ModelType model_type); SYNC_EXPORT ModelTypeNameMap GetUserSelectableTypeNameMap(); // This is the subset of UserTypes() that can be encrypted. SYNC_EXPORT_PRIVATE ModelTypeSet EncryptableUserTypes(); // This is the subset of UserTypes() that have priority over other types. These // types are synced before other user types and are never encrypted. SYNC_EXPORT ModelTypeSet PriorityUserTypes(); // Proxy types are placeholder types for handling implicitly enabling real // types. They do not exist at the server, and are simply used for // UI/Configuration logic. SYNC_EXPORT ModelTypeSet ProxyTypes(); // Returns a list of all control types. // // The control types are intended to contain metadata nodes that are essential // for the normal operation of the syncer. As such, they have the following // special properties: // - They are downloaded early during SyncBackend initialization. // - They are always enabled. Users may not disable these types. // - Their contents are not encrypted automatically. // - They support custom update application and conflict resolution logic. // - All change processing occurs on the sync thread (GROUP_PASSIVE). SYNC_EXPORT ModelTypeSet ControlTypes(); // Returns true if this is a control type. // // See comment above for more information on what makes these types special. SYNC_EXPORT bool IsControlType(ModelType model_type); // Core types are those data types used by sync's core functionality (i.e. not // user data types). These types are always enabled, and include ControlTypes(). // // The set of all core types. SYNC_EXPORT ModelTypeSet CoreTypes(); // Those core types that have high priority (includes ControlTypes()). SYNC_EXPORT ModelTypeSet PriorityCoreTypes(); // Determine a model type from the field number of its associated // EntitySpecifics field. Returns UNSPECIFIED if the field number is // not recognized. // // If you're putting the result in a ModelTypeSet, you should use the // following pattern: // // ModelTypeSet model_types; // // Say we're looping through a list of items, each of which has a // // field number. // for (...) { // int field_number = ...; // ModelType model_type = // GetModelTypeFromSpecificsFieldNumber(field_number); // if (!IsRealDataType(model_type)) { // DLOG(WARNING) << "Unknown field number " << field_number; // continue; // } // model_types.Put(model_type); // } SYNC_EXPORT_PRIVATE ModelType GetModelTypeFromSpecificsFieldNumber( int field_number); // Return the field number of the EntitySpecifics field associated with // a model type. SYNC_EXPORT int GetSpecificsFieldNumberFromModelType( ModelType model_type); FullModelTypeSet ToFullModelTypeSet(ModelTypeSet in); // TODO(sync): The functions below badly need some cleanup. // Returns a pointer to a string with application lifetime that represents // the name of |model_type|. SYNC_EXPORT const char* ModelTypeToString(ModelType model_type); // Some histograms take an integer parameter that represents a model type. // The mapping from ModelType to integer is defined here. It should match // the mapping from integer to labels defined in histograms.xml. SYNC_EXPORT int ModelTypeToHistogramInt(ModelType model_type); // Handles all model types, and not just real ones. // // Caller takes ownership of returned value. SYNC_EXPORT_PRIVATE base::StringValue* ModelTypeToValue(ModelType model_type); // Converts a Value into a ModelType - complement to ModelTypeToValue(). SYNC_EXPORT_PRIVATE ModelType ModelTypeFromValue(const base::Value& value); // Returns the ModelType corresponding to the name |model_type_string|. SYNC_EXPORT ModelType ModelTypeFromString( const std::string& model_type_string); // Returns the comma-separated string representation of |model_types|. SYNC_EXPORT std::string ModelTypeSetToString(ModelTypeSet model_types); // Returns the set of comma-separated model types from |model_type_string|. SYNC_EXPORT ModelTypeSet ModelTypeSetFromString( const std::string& model_type_string); SYNC_EXPORT scoped_ptr<base::ListValue> ModelTypeSetToValue( ModelTypeSet model_types); SYNC_EXPORT ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value); // Returns a string corresponding to the syncable tag for this datatype. SYNC_EXPORT std::string ModelTypeToRootTag(ModelType type); // Convert a real model type to a notification type (used for // subscribing to server-issued notifications). Returns true iff // |model_type| was a real model type and |notification_type| was // filled in. SYNC_EXPORT bool RealModelTypeToNotificationType( ModelType model_type, std::string* notification_type); // Converts a notification type to a real model type. Returns true // iff |notification_type| was the notification type of a real model // type and |model_type| was filled in. SYNC_EXPORT bool NotificationTypeToRealModelType( const std::string& notification_type, ModelType* model_type); // Returns true if |model_type| is a real datatype SYNC_EXPORT bool IsRealDataType(ModelType model_type); // Returns true if |model_type| is a proxy type SYNC_EXPORT bool IsProxyType(ModelType model_type); // Returns true if |model_type| is an act-once type. Act once types drop // entities after applying them. Drops are deletes that are not synced to other // clients. // TODO(haitaol): Make entries of act-once data types immutable. SYNC_EXPORT bool IsActOnceDataType(ModelType model_type); // Returns true if |model_type| requires its root folder to be explicitly // created on the server during initial sync. SYNC_EXPORT bool IsTypeWithServerGeneratedRoot(ModelType model_type); // Returns true if root folder for |model_type| is created on the client when // that type is initially synced. SYNC_EXPORT bool IsTypeWithClientGeneratedRoot(ModelType model_type); // Returns true if |model_type| supports parent-child hierarchy or entries. SYNC_EXPORT bool TypeSupportsHierarchy(ModelType model_type); // Returns true if |model_type| supports ordering of sibling entries. SYNC_EXPORT bool TypeSupportsOrdering(ModelType model_type); // Returns set of model types that should be backed up before first sync. SYNC_EXPORT ModelTypeSet BackupTypes(); } // namespace syncer #endif // SYNC_INTERNAL_API_PUBLIC_BASE_MODEL_TYPE_H_
Java
<?php /** * @see https://github.com/zendframework/zend-di for the canonical source repository * @copyright Copyright (c) 2017 Zend Technologies USA Inc. (https://www.zend.com) * @license https://github.com/zendframework/zend-di/blob/master/LICENSE.md New BSD License */ namespace ZendTest\Di\TestAsset; class A { }
Java
--- id: 587d7fac367417b2b2512bdb title: Establece un dominio y un rango en una escala challengeType: 6 forumTopicId: 301491 dashedName: set-a-domain-and-a-range-on-a-scale --- # --description-- Por defecto, las escalas usan la relación de identidad. Esto significa que el valor de entrada se asigna al valor de salida. Sin embargo, las escalas pueden ser mucho más flexibles e interesantes. Digamos que un conjunto de datos tiene valores entre 50 y 480. Esta es la información de entrada para una escala, también conocido como el <dfn>dominio</dfn>. Quieres trazar esos puntos a lo largo del eje `x` en el lienzo SVG, entre 10 unidades y 500 unidades. Esta es la información de salida, también conocida como el <dfn>rango</dfn>. Los métodos `domain()` y `range()` establecen estos valores para la escala. Ambos métodos toman un arreglo de al menos dos elementos como argumento. Aquí un ejemplo: ```js scale.domain([50, 480]); scale.range([10, 500]); scale(50) scale(480) scale(325) scale(750) d3.scaleLinear() ``` En orden, los siguientes valores se mostrarían en la consola: `10`, `500`, `323.37`, y `807.67`. Observa que la escala usa la relación lineal entre los valores del dominio y del rango para averiguar cuál debe ser la salida para un número dado. El valor mínimo en el dominio (50) se asigna al valor mínimo (10) en el rango. # --instructions-- Crea una escala y establece su dominio a `[250, 500]` y su rango a `[10, 150]`. **Nota:** Puedes encadenar los métodos `domain()` y `range()` a la variable `scale`. # --hints-- Tu código debe usar el método `domain()`. ```js assert(code.match(/\.domain/g)); ``` El `domain()` de `scale` (escala) debe ser establecido a `[250, 500]`. ```js assert(JSON.stringify(scale.domain()) == JSON.stringify([250, 500])); ``` Tu código debe usar el método `range()`. ```js assert(code.match(/\.range/g)); ``` El `range()` de `scale` (escala) debe ser establecido a `[10, 150]`. ```js assert(JSON.stringify(scale.range()) == JSON.stringify([10, 150])); ``` El texto en el `h2` debe ser `-102`. ```js assert($('h2').text() == '-102'); ``` # --seed-- ## --seed-contents-- ```html <body> <script> // Add your code below this line const scale = d3.scaleLinear(); // Add your code above this line const output = scale(50); d3.select("body") .append("h2") .text(output); </script> </body> ``` # --solutions-- ```html <body> <script> const scale = d3.scaleLinear(); scale.domain([250, 500]) scale.range([10, 150]) const output = scale(50); d3.select("body") .append("h2") .text(output); </script> </body> ```
Java
module Verifier.SAW.SATQuery ( SATQuery(..) , SATResult(..) , satQueryAsTerm ) where import Control.Monad (foldM) import Data.Map (Map) import Data.Set (Set) import Verifier.SAW.Name import Verifier.SAW.FiniteValue import Verifier.SAW.SharedTerm -- | This datatype represents a satisfiability query that might -- be dispatched to a solver. It carries a series of assertions -- to be made to a solver, together with a collection of -- variables we expect the solver to report models over, -- and a collection of @VarIndex@ values identifying -- subterms that should be considered uninterpreted. -- -- All the @ExtCns@ values in the query should -- appear either in @satVariables@ or @satUninterp@. -- Constant values for which definitions are provided -- may also appear in @satUninterp@, in which case -- they will be treated as uninterpreted. Otherwise, -- their definitions will be unfolded. -- -- Solve solvers do not support uninterpreted values -- and will fail if presented a query that requests them. data SATQuery = SATQuery { satVariables :: Map (ExtCns Term) FirstOrderType -- ^ The variables in the query, for which we -- expect the solver to find values in satisfiable -- cases. INVARIANT: The type of the @ExtCns@ keys -- should correspond to the @FirstOrderType@ values. , satUninterp :: Set VarIndex -- ^ A set indicating which variables and constant -- values should be considered uninterpreted by -- the solver. Models will not report values -- for uninterpreted values. , satAsserts :: [Term] -- ^ A collection of assertions. These should -- all be terms of type @Bool@. The overall -- query should be understood as the conjunction -- of these terms. } -- TODO, allow first-order propositions in addition to Boolean terms. -- | The result of a sat query. In the event a model is found, -- return a mapping from the @ExtCns@ variables to values. data SATResult = Unsatisfiable | Satisfiable (ExtCns Term -> IO FirstOrderValue) | Unknown -- | Compute the conjunction of all the assertions -- in this SAT query as a single term of type Bool. satQueryAsTerm :: SharedContext -> SATQuery -> IO Term satQueryAsTerm sc satq = case satAsserts satq of [] -> scBool sc True (x:xs) -> foldM (scAnd sc) x xs -- TODO, we may have to rethink this function -- once we allow first-order statements.
Java
// // detail/pipe_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2010 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #define BOOST_ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include <boost/asio/detail/config.hpp> #if !defined(BOOST_WINDOWS) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(BOOST_ASIO_HAS_EVENTFD) #include <boost/asio/detail/push_options.hpp> namespace boost { namespace asio { namespace detail { class pipe_select_interrupter { public: // Constructor. BOOST_ASIO_DECL pipe_select_interrupter(); // Destructor. BOOST_ASIO_DECL ~pipe_select_interrupter(); // Interrupt the select call. BOOST_ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. BOOST_ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. int write_descriptor_; }; } // namespace detail } // namespace asio } // namespace boost #include <boost/asio/detail/pop_options.hpp> #if defined(BOOST_ASIO_HEADER_ONLY) # include <boost/asio/detail/impl/pipe_select_interrupter.ipp> #endif // defined(BOOST_ASIO_HEADER_ONLY) #endif // !defined(BOOST_ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(BOOST_WINDOWS) #endif // BOOST_ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP
Java
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Invoke-LockWorkStation', 'Author': ['@harmj0y'], 'Description': ("Locks the workstation's display."), 'Background' : False, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : False, 'Language' : 'powershell', 'MinLanguageVersion' : '2', 'Comments': [ 'http://poshcode.org/1640' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): script = """ Function Invoke-LockWorkStation { # region define P/Invoke types dynamically # stolen from PowerSploit https://github.com/mattifestation/PowerSploit/blob/master/Mayhem/Mayhem.psm1 # thanks matt and chris :) $DynAssembly = New-Object System.Reflection.AssemblyName('Win32') $AssemblyBuilder = [AppDomain]::CurrentDomain.DefineDynamicAssembly($DynAssembly, [Reflection.Emit.AssemblyBuilderAccess]::Run) $ModuleBuilder = $AssemblyBuilder.DefineDynamicModule('Win32', $False) $TypeBuilder = $ModuleBuilder.DefineType('Win32.User32', 'Public, Class') $DllImportConstructor = [Runtime.InteropServices.DllImportAttribute].GetConstructor(@([String])) $SetLastError = [Runtime.InteropServices.DllImportAttribute].GetField('SetLastError') $SetLastErrorCustomAttribute = New-Object Reflection.Emit.CustomAttributeBuilder($DllImportConstructor, @('User32.dll'), [Reflection.FieldInfo[]]@($SetLastError), @($True)) # Define [Win32.User32]::LockWorkStation() $PInvokeMethod = $TypeBuilder.DefinePInvokeMethod('LockWorkStation', 'User32.dll', ([Reflection.MethodAttributes]::Public -bor [Reflection.MethodAttributes]::Static), [Reflection.CallingConventions]::Standard, [Bool], [Type[]]@(), [Runtime.InteropServices.CallingConvention]::Winapi, [Runtime.InteropServices.CharSet]::Ansi) $PInvokeMethod.SetCustomAttribute($SetLastErrorCustomAttribute) $User32 = $TypeBuilder.CreateType() $Null = $User32::LockWorkStation() } Invoke-LockWorkStation; "Workstation locked." """ return script
Java
/* Copyright 2021 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ /* MT SCP RV32i configuration */ #include "cache.h" #include "csr.h" #include "hooks.h" #include "registers.h" #define SCP_SRAM_END (CONFIG_IPC_SHARED_OBJ_ADDR & (~(0x400 - 1))) struct mpu_entry mpu_entries[NR_MPU_ENTRIES] = { /* SRAM (for most code, data) */ {0, SCP_SRAM_END, MPU_ATTR_C | MPU_ATTR_W | MPU_ATTR_R}, /* SRAM (for IPI shared buffer) */ {SCP_SRAM_END, SCP_FW_END, MPU_ATTR_W | MPU_ATTR_R}, /* For AP domain */ #ifdef CHIP_VARIANT_MT8195 {0x60000000, 0x70000000, MPU_ATTR_W | MPU_ATTR_R | MPU_ATTR_P}, #else {0x60000000, 0x70000000, MPU_ATTR_W | MPU_ATTR_R}, #endif /* For SCP sys */ {0x70000000, 0x80000000, MPU_ATTR_W | MPU_ATTR_R}, #ifdef CHIP_VARIANT_MT8195 {0x10000000, 0x11400000, MPU_ATTR_C | MPU_ATTR_W | MPU_ATTR_R}, {CONFIG_PANIC_DRAM_BASE, CONFIG_PANIC_DRAM_BASE + CONFIG_PANIC_DRAM_SIZE, MPU_ATTR_W | MPU_ATTR_R}, #else {0x10000000, 0x11400000, MPU_ATTR_W | MPU_ATTR_R}, #endif }; #include "gpio_list.h" #ifdef CONFIG_PANIC_CONSOLE_OUTPUT static void report_previous_panic(void) { struct panic_data * panic = panic_get_data(); if (panic == NULL && SCP_CORE0_MON_PC_LATCH == 0) return; ccprintf("[Previous Panic]\n"); if (panic) { panic_data_ccprint(panic); } else { ccprintf("No panic data\n"); } ccprintf("Latch PC:%x LR:%x SP:%x\n", SCP_CORE0_MON_PC_LATCH, SCP_CORE0_MON_LR_LATCH, SCP_CORE0_MON_SP_LATCH); } DECLARE_HOOK(HOOK_INIT, report_previous_panic, HOOK_PRIO_DEFAULT); #endif
Java
/* Copyright (C) 2012 Motorola Mobility Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * 3. Neither the name of Motorola Mobility Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CSSSupportsRule_h #define CSSSupportsRule_h #include "core/css/CSSGroupingRule.h" namespace blink { class CSSRule; class StyleRuleSupports; class CSSSupportsRule FINAL : public CSSGroupingRule { public: static PassRefPtrWillBeRawPtr<CSSSupportsRule> create(StyleRuleSupports* rule, CSSStyleSheet* sheet) { return adoptRefWillBeNoop(new CSSSupportsRule(rule, sheet)); } virtual ~CSSSupportsRule() { } virtual CSSRule::Type type() const OVERRIDE { return SUPPORTS_RULE; } virtual String cssText() const OVERRIDE; String conditionText() const; virtual void trace(Visitor* visitor) OVERRIDE { CSSGroupingRule::trace(visitor); } private: CSSSupportsRule(StyleRuleSupports*, CSSStyleSheet*); }; DEFINE_CSS_RULE_TYPE_CASTS(CSSSupportsRule, SUPPORTS_RULE); } // namespace blink #endif // CSSSupportsRule_h
Java
/* ***** BEGIN LICENSE BLOCK ***** * Distributed under the BSD license: * * Copyright (c) 2010, Ajax.org B.V. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Ajax.org B.V. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ***** END LICENSE BLOCK ***** */ define(function(require, exports, module) { "use strict"; var keys = require("./keys"); var useragent = require("./useragent"); exports.addListener = function(elem, type, callback) { if (elem.addEventListener) { return elem.addEventListener(type, callback, false); } if (elem.attachEvent) { var wrapper = function() { callback.call(elem, window.event); }; callback._wrapper = wrapper; elem.attachEvent("on" + type, wrapper); } }; exports.removeListener = function(elem, type, callback) { if (elem.removeEventListener) { return elem.removeEventListener(type, callback, false); } if (elem.detachEvent) { elem.detachEvent("on" + type, callback._wrapper || callback); } }; /* * Prevents propagation and clobbers the default action of the passed event */ exports.stopEvent = function(e) { exports.stopPropagation(e); exports.preventDefault(e); return false; }; exports.stopPropagation = function(e) { if (e.stopPropagation) e.stopPropagation(); else e.cancelBubble = true; }; exports.preventDefault = function(e) { if (e.preventDefault) e.preventDefault(); else e.returnValue = false; }; /* * @return {Number} 0 for left button, 1 for middle button, 2 for right button */ exports.getButton = function(e) { if (e.type == "dblclick") return 0; if (e.type == "contextmenu" || (useragent.isMac && (e.ctrlKey && !e.altKey && !e.shiftKey))) return 2; // DOM Event if (e.preventDefault) { return e.button; } // old IE else { return {1:0, 2:2, 4:1}[e.button]; } }; exports.capture = function(el, eventHandler, releaseCaptureHandler) { function onMouseUp(e) { eventHandler && eventHandler(e); releaseCaptureHandler && releaseCaptureHandler(e); exports.removeListener(document, "mousemove", eventHandler, true); exports.removeListener(document, "mouseup", onMouseUp, true); exports.removeListener(document, "dragstart", onMouseUp, true); } exports.addListener(document, "mousemove", eventHandler, true); exports.addListener(document, "mouseup", onMouseUp, true); exports.addListener(document, "dragstart", onMouseUp, true); return onMouseUp; }; exports.addMouseWheelListener = function(el, callback) { if ("onmousewheel" in el) { exports.addListener(el, "mousewheel", function(e) { var factor = 8; if (e.wheelDeltaX !== undefined) { e.wheelX = -e.wheelDeltaX / factor; e.wheelY = -e.wheelDeltaY / factor; } else { e.wheelX = 0; e.wheelY = -e.wheelDelta / factor; } callback(e); }); } else if ("onwheel" in el) { exports.addListener(el, "wheel", function(e) { var factor = 0.35; switch (e.deltaMode) { case e.DOM_DELTA_PIXEL: e.wheelX = e.deltaX * factor || 0; e.wheelY = e.deltaY * factor || 0; break; case e.DOM_DELTA_LINE: case e.DOM_DELTA_PAGE: e.wheelX = (e.deltaX || 0) * 5; e.wheelY = (e.deltaY || 0) * 5; break; } callback(e); }); } else { exports.addListener(el, "DOMMouseScroll", function(e) { if (e.axis && e.axis == e.HORIZONTAL_AXIS) { e.wheelX = (e.detail || 0) * 5; e.wheelY = 0; } else { e.wheelX = 0; e.wheelY = (e.detail || 0) * 5; } callback(e); }); } }; exports.addMultiMouseDownListener = function(el, timeouts, eventHandler, callbackName) { var clicks = 0; var startX, startY, timer; var eventNames = { 2: "dblclick", 3: "tripleclick", 4: "quadclick" }; exports.addListener(el, "mousedown", function(e) { if (exports.getButton(e) !== 0) { clicks = 0; } else if (e.detail > 1) { clicks++; if (clicks > 4) clicks = 1; } else { clicks = 1; } if (useragent.isIE) { var isNewClick = Math.abs(e.clientX - startX) > 5 || Math.abs(e.clientY - startY) > 5; if (!timer || isNewClick) clicks = 1; if (timer) clearTimeout(timer); timer = setTimeout(function() {timer = null}, timeouts[clicks - 1] || 600); if (clicks == 1) { startX = e.clientX; startY = e.clientY; } } e._clicks = clicks; eventHandler[callbackName]("mousedown", e); if (clicks > 4) clicks = 0; else if (clicks > 1) return eventHandler[callbackName](eventNames[clicks], e); }); if (useragent.isOldIE) { exports.addListener(el, "dblclick", function(e) { clicks = 2; if (timer) clearTimeout(timer); timer = setTimeout(function() {timer = null}, timeouts[clicks - 1] || 600); eventHandler[callbackName]("mousedown", e); eventHandler[callbackName](eventNames[clicks], e); }); } }; var getModifierHash = useragent.isMac && useragent.isOpera && !("KeyboardEvent" in window) ? function(e) { return 0 | (e.metaKey ? 1 : 0) | (e.altKey ? 2 : 0) | (e.shiftKey ? 4 : 0) | (e.ctrlKey ? 8 : 0); } : function(e) { return 0 | (e.ctrlKey ? 1 : 0) | (e.altKey ? 2 : 0) | (e.shiftKey ? 4 : 0) | (e.metaKey ? 8 : 0); }; exports.getModifierString = function(e) { return keys.KEY_MODS[getModifierHash(e)]; }; function normalizeCommandKeys(callback, e, keyCode) { var hashId = getModifierHash(e); if (!useragent.isMac && pressedKeys) { if (pressedKeys[91] || pressedKeys[92]) hashId |= 8; if (pressedKeys.altGr) { if ((3 & hashId) != 3) pressedKeys.altGr = 0; else return; } if (keyCode === 18 || keyCode === 17) { var location = "location" in e ? e.location : e.keyLocation; if (keyCode === 17 && location === 1) { if (pressedKeys[keyCode] == 1) ts = e.timeStamp; } else if (keyCode === 18 && hashId === 3 && location === 2) { var dt = e.timeStamp - ts; if (dt < 50) pressedKeys.altGr = true; } } } if (keyCode in keys.MODIFIER_KEYS) { keyCode = -1; } if (hashId & 8 && (keyCode === 91 || keyCode === 93)) { keyCode = -1; } if (!hashId && keyCode === 13) { var location = "location" in e ? e.location : e.keyLocation; if (location === 3) { callback(e, hashId, -keyCode); if (e.defaultPrevented) return; } } if (useragent.isChromeOS && hashId & 8) { callback(e, hashId, keyCode); if (e.defaultPrevented) return; else hashId &= ~8; } // If there is no hashId and the keyCode is not a function key, then // we don't call the callback as we don't handle a command key here // (it's a normal key/character input). if (!hashId && !(keyCode in keys.FUNCTION_KEYS) && !(keyCode in keys.PRINTABLE_KEYS)) { return false; } return callback(e, hashId, keyCode); } var pressedKeys = null; var ts = 0; exports.addCommandKeyListener = function(el, callback) { var addListener = exports.addListener; if (useragent.isOldGecko || (useragent.isOpera && !("KeyboardEvent" in window))) { // Old versions of Gecko aka. Firefox < 4.0 didn't repeat the keydown // event if the user pressed the key for a longer time. Instead, the // keydown event was fired once and later on only the keypress event. // To emulate the 'right' keydown behavior, the keyCode of the initial // keyDown event is stored and in the following keypress events the // stores keyCode is used to emulate a keyDown event. var lastKeyDownKeyCode = null; addListener(el, "keydown", function(e) { lastKeyDownKeyCode = e.keyCode; }); addListener(el, "keypress", function(e) { return normalizeCommandKeys(callback, e, lastKeyDownKeyCode); }); } else { var lastDefaultPrevented = null; addListener(el, "keydown", function(e) { pressedKeys[e.keyCode] = (pressedKeys[e.keyCode] || 0) + 1; var result = normalizeCommandKeys(callback, e, e.keyCode); lastDefaultPrevented = e.defaultPrevented; return result; }); addListener(el, "keypress", function(e) { if (lastDefaultPrevented && (e.ctrlKey || e.altKey || e.shiftKey || e.metaKey)) { exports.stopEvent(e); lastDefaultPrevented = null; } }); addListener(el, "keyup", function(e) { pressedKeys[e.keyCode] = null; }); if (!pressedKeys) { pressedKeys = Object.create(null); addListener(window, "focus", function(e) { pressedKeys = Object.create(null); }); } } }; if (window.postMessage && !useragent.isOldIE) { var postMessageId = 1; exports.nextTick = function(callback, win) { win = win || window; var messageName = "zero-timeout-message-" + postMessageId; exports.addListener(win, "message", function listener(e) { if (e.data == messageName) { exports.stopPropagation(e); exports.removeListener(win, "message", listener); callback(); } }); win.postMessage(messageName, "*"); }; } exports.nextFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame || window.oRequestAnimationFrame; if (exports.nextFrame) exports.nextFrame = exports.nextFrame.bind(window); else exports.nextFrame = function(callback) { setTimeout(callback, 17); }; });
Java
#!/bin/bash DD=../../PhoneGap/ios/KNappen/assets/world/KNappen rm -rf "$DD" mkdir -p "$DD" cp -va ./KNappen.MobileSPA/* "$DD" pushd "$DD" rm -rf Brukerdokumentasjon *.ts bin obj Properties UnitTests index.html Test.html index.html *.csproj *.user config.xml Web*.config packages.config popd
Java
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: [email protected] ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division """ This python module defines Connection class. """ import copy from vistrails.db.domain import DBConnection from vistrails.core.vistrail.port import PortEndPoint, Port import unittest from vistrails.db.domain import IdScope ################################################################################ class Connection(DBConnection): """ A Connection is a connection between two modules. Right now there's only Module connections. """ ########################################################################## # Constructors and copy @staticmethod def from_port_specs(source, dest): """from_port_specs(source: PortSpec, dest: PortSpec) -> Connection Static method that creates a Connection given source and destination ports. """ conn = Connection() conn.source = copy.copy(source) conn.destination = copy.copy(dest) return conn @staticmethod def fromID(id): """fromTypeID(id: int) -> Connection Static method that creates a Connection given an id. """ conn = Connection() conn.id = id conn.source.endPoint = PortEndPoint.Source conn.destination.endPoint = PortEndPoint.Destination return conn def __init__(self, *args, **kwargs): """__init__() -> Connection Initializes source and destination ports. """ DBConnection.__init__(self, *args, **kwargs) if self.id is None: self.db_id = -1 if not len(self.ports) > 0: self.source = Port(type='source') self.destination = Port(type='destination') def __copy__(self): """__copy__() -> Connection - Returns a clone of self. """ return Connection.do_copy(self) def do_copy(self, new_ids=False, id_scope=None, id_remap=None): cp = DBConnection.do_copy(self, new_ids, id_scope, id_remap) cp.__class__ = Connection for port in cp.ports: Port.convert(port) return cp ########################################################################## @staticmethod def convert(_connection): # print "ports: %s" % _Connection._get_ports(_connection) if _connection.__class__ == Connection: return _connection.__class__ = Connection for port in _connection.ports: Port.convert(port) ########################################################################## # Properties id = DBConnection.db_id ports = DBConnection.db_ports def add_port(self, port): self.db_add_port(port) def _get_sourceId(self): """ _get_sourceId() -> int Returns the module id of source port. Do not use this function, use sourceId property: c.sourceId """ return self.source.moduleId def _set_sourceId(self, id): """ _set_sourceId(id : int) -> None Sets this connection source id. It updates both self.source.moduleId and self.source.id. Do not use this function, use sourceId property: c.sourceId = id """ self.source.moduleId = id self.source.id = id sourceId = property(_get_sourceId, _set_sourceId) def _get_destinationId(self): """ _get_destinationId() -> int Returns the module id of dest port. Do not use this function, use sourceId property: c.destinationId """ return self.destination.moduleId def _set_destinationId(self, id): """ _set_destinationId(id : int) -> None Sets this connection destination id. It updates self.dest.moduleId. Do not use this function, use destinationId property: c.destinationId = id """ self.destination.moduleId = id destinationId = property(_get_destinationId, _set_destinationId) def _get_source(self): """_get_source() -> Port Returns source port. Do not use this function, use source property: c.source """ try: return self.db_get_port_by_type('source') except KeyError: pass return None def _set_source(self, source): """_set_source(source: Port) -> None Sets this connection source port. Do not use this function, use source property instead: c.source = source """ try: port = self.db_get_port_by_type('source') self.db_delete_port(port) except KeyError: pass if source is not None: self.db_add_port(source) source = property(_get_source, _set_source) def _get_destination(self): """_get_destination() -> Port Returns destination port. Do not use this function, use destination property: c.destination """ # return self.db_ports['destination'] try: return self.db_get_port_by_type('destination') except KeyError: pass return None def _set_destination(self, dest): """_set_destination(dest: Port) -> None Sets this connection destination port. Do not use this function, use destination property instead: c.destination = dest """ try: port = self.db_get_port_by_type('destination') self.db_delete_port(port) except KeyError: pass if dest is not None: self.db_add_port(dest) destination = property(_get_destination, _set_destination) dest = property(_get_destination, _set_destination) ########################################################################## # Operators def __str__(self): """__str__() -> str - Returns a string representation of a Connection object. """ rep = "<connection id='%s'>%s%s</connection>" return rep % (str(self.id), str(self.source), str(self.destination)) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): if type(other) != type(self): return False return (self.source == other.source and self.dest == other.dest) def equals_no_id(self, other): """Checks equality up to ids (connection and ports).""" if type(self) != type(other): return False return (self.source.equals_no_id(other.source) and self.dest.equals_no_id(other.dest)) ################################################################################ # Testing class TestConnection(unittest.TestCase): def create_connection(self, id_scope=IdScope()): from vistrails.core.vistrail.port import Port from vistrails.core.modules.basic_modules import identifier as basic_pkg source = Port(id=id_scope.getNewId(Port.vtType), type='source', moduleId=21L, moduleName='String', name='value', signature='(%s:String)' % basic_pkg) destination = Port(id=id_scope.getNewId(Port.vtType), type='destination', moduleId=20L, moduleName='Float', name='value', signature='(%s:Float)' % basic_pkg) connection = Connection(id=id_scope.getNewId(Connection.vtType), ports=[source, destination]) return connection def test_copy(self): id_scope = IdScope() c1 = self.create_connection(id_scope) c2 = copy.copy(c1) self.assertEquals(c1, c2) self.assertEquals(c1.id, c2.id) c3 = c1.do_copy(True, id_scope, {}) self.assertEquals(c1, c3) self.assertNotEquals(c1.id, c3.id) def test_serialization(self): import vistrails.core.db.io c1 = self.create_connection() xml_str = vistrails.core.db.io.serialize(c1) c2 = vistrails.core.db.io.unserialize(xml_str, Connection) self.assertEquals(c1, c2) self.assertEquals(c1.id, c2.id) def testEmptyConnection(self): """Tests sane initialization of empty connection""" c = Connection() self.assertEquals(c.source.endPoint, PortEndPoint.Source) self.assertEquals(c.destination.endPoint, PortEndPoint.Destination) if __name__ == '__main__': unittest.main()
Java
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Radu Serban // ============================================================================= // // Demo code about collisions and contacts using the penalty method (SMC) // // ============================================================================= #include "chrono/physics/ChSystemSMC.h" #include "chrono/physics/ChContactContainerSMC.h" #include "chrono/solver/ChSolverSMC.h" #include "chrono_irrlicht/ChIrrApp.h" #include <irrlicht.h> // Use the namespaces of Chrono using namespace chrono; using namespace chrono::irrlicht; // Use the main namespaces of Irrlicht using namespace irr; using namespace irr::core; using namespace irr::scene; using namespace irr::video; using namespace irr::io; using namespace irr::gui; void AddWall(std::shared_ptr<ChBody> body, const ChVector<>& dim, const ChVector<>& loc) { body->GetCollisionModel()->AddBox(dim.x(), dim.y(), dim.z(), loc); auto box = std::make_shared<ChBoxShape>(); box->GetBoxGeometry().Size = dim; box->GetBoxGeometry().Pos = loc; box->SetColor(ChColor(1, 0, 0)); box->SetFading(0.6f); body->AddAsset(box); } int main(int argc, char* argv[]) { GetLog() << "Copyright (c) 2017 projectchrono.org\nChrono version: " << CHRONO_VERSION << "\n\n"; // Simulation parameters double gravity = -9.81; double time_step = 0.00001; double out_step = 2000 * time_step; // Parameters for the falling ball int ballId = 100; double radius = 1; double mass = 1000; ChVector<> pos(0, 2, 0); ChQuaternion<> rot(1, 0, 0, 0); ChVector<> init_vel(0, 0, 0); // Parameters for the containing bin int binId = 200; double width = 2; double length = 2; double height = 1; double thickness = 0.1; // Create the system ChSystemSMC msystem; // The following two lines are optional, since they are the default options. They are added for future reference, // i.e. when needed to change those models. msystem.SetContactForceModel(ChSystemSMC::ContactForceModel::Hertz); msystem.SetAdhesionForceModel(ChSystemSMC::AdhesionForceModel::Constant); msystem.Set_G_acc(ChVector<>(0, gravity, 0)); // Change the default collision effective radius of curvature collision::ChCollisionInfo::SetDefaultEffectiveCurvatureRadius(1); // Create the Irrlicht visualization ChIrrApp application(&msystem, L"SMC demo", core::dimension2d<u32>(800, 600), false, true); // Easy shortcuts to add camera, lights, logo and sky in Irrlicht scene application.AddTypicalLogo(); application.AddTypicalSky(); application.AddTypicalLights(); application.AddTypicalCamera(core::vector3df(0, 3, -6)); // This means that contactforces will be shown in Irrlicht application application.SetSymbolscale(1e-4); application.SetContactsDrawMode(ChIrrTools::eCh_ContactsDrawMode::CONTACT_FORCES); // Create a material (will be used by both objects) auto material = std::make_shared<ChMaterialSurfaceSMC>(); material->SetRestitution(0.1f); material->SetFriction(0.4f); material->SetAdhesion(0); // Magnitude of the adhesion in Constant adhesion model // Create the falling ball auto ball = std::make_shared<ChBody>(ChMaterialSurface::SMC); ball->SetIdentifier(ballId); ball->SetMass(mass); ball->SetPos(pos); ball->SetRot(rot); ball->SetPos_dt(init_vel); // ball->SetWvel_par(ChVector<>(0,0,3)); ball->SetBodyFixed(false); ball->SetMaterialSurface(material); ball->SetCollide(true); ball->GetCollisionModel()->ClearModel(); ball->GetCollisionModel()->AddSphere(radius); ball->GetCollisionModel()->BuildModel(); ball->SetInertiaXX(0.4 * mass * radius * radius * ChVector<>(1, 1, 1)); auto sphere = std::make_shared<ChSphereShape>(); sphere->GetSphereGeometry().rad = radius; ball->AddAsset(sphere); auto mtexture = std::make_shared<ChTexture>(); mtexture->SetTextureFilename(GetChronoDataFile("bluwhite.png")); ball->AddAsset(mtexture); msystem.AddBody(ball); // Create container auto bin = std::make_shared<ChBody>(ChMaterialSurface::SMC); bin->SetIdentifier(binId); bin->SetMass(1); bin->SetPos(ChVector<>(0, 0, 0)); bin->SetRot(ChQuaternion<>(1, 0, 0, 0)); bin->SetCollide(true); bin->SetBodyFixed(true); bin->SetMaterialSurface(material); bin->GetCollisionModel()->ClearModel(); AddWall(bin, ChVector<>(width, thickness, length), ChVector<>(0, 0, 0)); // AddWall(bin, ChVector<>(thickness, height, length), ChVector<>(-width + thickness, height, 0)); // AddWall(bin, ChVector<>(thickness, height, length), ChVector<>(width - thickness, height, 0)); // AddWall(bin, ChVector<>(width, height, thickness), ChVector<>(0, height, -length + thickness)); // AddWall(bin, ChVector<>(width, height, thickness), ChVector<>(0, height, length - thickness)); bin->GetCollisionModel()->BuildModel(); msystem.AddBody(bin); // Complete asset construction application.AssetBindAll(); application.AssetUpdateAll(); // The soft-real-time cycle double time = 0.0; double out_time = 0.0; while (application.GetDevice()->run()) { application.BeginScene(); application.DrawAll(); ChIrrTools::drawGrid(application.GetVideoDriver(), 0.2, 0.2, 20, 20, ChCoordsys<>(ChVector<>(0, 0, 0), Q_from_AngX(CH_C_PI_2)), video::SColor(255, 80, 100, 100), true); while (time < out_time) { msystem.DoStepDynamics(time_step); time += time_step; } out_time += out_step; application.EndScene(); } return 0; }
Java
Spree Ordering Info =================== Introduction goes here. Installation ------------ Add spree_ordering_info to your Gemfile: ```ruby gem 'spree_ordering_info' ``` Bundle your dependencies and run the installation generator: ```shell bundle bundle exec rails g spree_ordering_info:install ``` Testing ------- First bundle your dependencies, then run `rake`. `rake` will default to building the dummy app if it does not exist, then it will run specs. The dummy app can be regenerated by using `rake test_app`. ```shell bundle bundle exec rake ``` When testing your applications integration with this extension you may use it's factories. Simply add this require statement to your spec_helper: ```ruby require 'spree_ordering_info/factories' ``` Copyright (c) 2017 Astek Wallcovering, Inc., released under the New BSD License
Java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82a.cpp Label Definition File: CWE134_Uncontrolled_Format_String.label.xml Template File: sources-sinks-82a.tmpl.cpp */ /* * @description * CWE: 134 Uncontrolled Format String * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Copy a fixed string into data * Sinks: fprintf * GoodSink: fprintf with "%s" as the second argument and data as the third * BadSink : fprintf with data as the second argument * Flow Variant: 82 Data flow: data passed in a parameter to an virtual method called via a pointer * * */ #include "std_testcase.h" #include "CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82.h" #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else /* NOT _WIN32 */ #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define IP_ADDRESS "127.0.0.1" namespace CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82 { #ifndef OMITBAD void bad() { char * data; char dataBuffer[100] = ""; data = dataBuffer; { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; char *replace; SOCKET connectSocket = INVALID_SOCKET; size_t dataLen = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a connect socket */ connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connectSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = inet_addr(IP_ADDRESS); service.sin_port = htons(TCP_PORT); if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ /* Abort on error or the connection was closed */ recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(char) * (100 - dataLen - 1), 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* Append null terminator */ data[dataLen + recvResult / sizeof(char)] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) { *replace = '\0'; } replace = strchr(data, '\n'); if (replace) { *replace = '\0'; } } while (0); if (connectSocket != INVALID_SOCKET) { CLOSE_SOCKET(connectSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_base* baseObject = new CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_bad; baseObject->action(data); delete baseObject; } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ static void goodG2B() { char * data; char dataBuffer[100] = ""; data = dataBuffer; /* FIX: Use a fixed string that does not contain a format specifier */ strcpy(data, "fixedstringtest"); CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_base* baseObject = new CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_goodG2B; baseObject->action(data); delete baseObject; } /* goodB2G uses the BadSource with the GoodSink */ static void goodB2G() { char * data; char dataBuffer[100] = ""; data = dataBuffer; { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; char *replace; SOCKET connectSocket = INVALID_SOCKET; size_t dataLen = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a connect socket */ connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connectSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = inet_addr(IP_ADDRESS); service.sin_port = htons(TCP_PORT); if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ /* Abort on error or the connection was closed */ recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(char) * (100 - dataLen - 1), 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* Append null terminator */ data[dataLen + recvResult / sizeof(char)] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) { *replace = '\0'; } replace = strchr(data, '\n'); if (replace) { *replace = '\0'; } } while (0); if (connectSocket != INVALID_SOCKET) { CLOSE_SOCKET(connectSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_base* baseObject = new CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82_goodB2G; baseObject->action(data); delete baseObject; } void good() { goodG2B(); goodB2G(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE134_Uncontrolled_Format_String__char_connect_socket_fprintf_82; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
Java
/* * libjingle * Copyright 2010, Google Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string> #include "talk/p2p/base/sessionmessages.h" #include "talk/base/logging.h" #include "talk/base/scoped_ptr.h" #include "talk/base/stringutils.h" #include "talk/p2p/base/constants.h" #include "talk/p2p/base/p2ptransport.h" #include "talk/p2p/base/parsing.h" #include "talk/p2p/base/sessionclient.h" #include "talk/p2p/base/sessiondescription.h" #include "talk/p2p/base/transport.h" #include "talk/xmllite/xmlconstants.h" #include "talk/xmpp/constants.h" namespace cricket { ActionType ToActionType(const std::string& type) { if (type == GINGLE_ACTION_INITIATE) return ACTION_SESSION_INITIATE; if (type == GINGLE_ACTION_INFO) return ACTION_SESSION_INFO; if (type == GINGLE_ACTION_ACCEPT) return ACTION_SESSION_ACCEPT; if (type == GINGLE_ACTION_REJECT) return ACTION_SESSION_REJECT; if (type == GINGLE_ACTION_TERMINATE) return ACTION_SESSION_TERMINATE; if (type == GINGLE_ACTION_CANDIDATES) return ACTION_TRANSPORT_INFO; if (type == JINGLE_ACTION_SESSION_INITIATE) return ACTION_SESSION_INITIATE; if (type == JINGLE_ACTION_TRANSPORT_INFO) return ACTION_TRANSPORT_INFO; if (type == JINGLE_ACTION_TRANSPORT_ACCEPT) return ACTION_TRANSPORT_ACCEPT; if (type == JINGLE_ACTION_SESSION_INFO) return ACTION_SESSION_INFO; if (type == JINGLE_ACTION_SESSION_ACCEPT) return ACTION_SESSION_ACCEPT; if (type == JINGLE_ACTION_SESSION_TERMINATE) return ACTION_SESSION_TERMINATE; if (type == JINGLE_ACTION_TRANSPORT_INFO) return ACTION_TRANSPORT_INFO; if (type == JINGLE_ACTION_TRANSPORT_ACCEPT) return ACTION_TRANSPORT_ACCEPT; if (type == JINGLE_ACTION_DESCRIPTION_INFO) return ACTION_DESCRIPTION_INFO; if (type == GINGLE_ACTION_UPDATE) return ACTION_DESCRIPTION_INFO; return ACTION_UNKNOWN; } std::string ToJingleString(ActionType type) { switch (type) { case ACTION_SESSION_INITIATE: return JINGLE_ACTION_SESSION_INITIATE; case ACTION_SESSION_INFO: return JINGLE_ACTION_SESSION_INFO; case ACTION_SESSION_ACCEPT: return JINGLE_ACTION_SESSION_ACCEPT; // Notice that reject and terminate both go to // "session-terminate", but there is no "session-reject". case ACTION_SESSION_REJECT: case ACTION_SESSION_TERMINATE: return JINGLE_ACTION_SESSION_TERMINATE; case ACTION_TRANSPORT_INFO: return JINGLE_ACTION_TRANSPORT_INFO; case ACTION_TRANSPORT_ACCEPT: return JINGLE_ACTION_TRANSPORT_ACCEPT; default: return ""; } } std::string ToGingleString(ActionType type) { switch (type) { case ACTION_SESSION_INITIATE: return GINGLE_ACTION_INITIATE; case ACTION_SESSION_INFO: return GINGLE_ACTION_INFO; case ACTION_SESSION_ACCEPT: return GINGLE_ACTION_ACCEPT; case ACTION_SESSION_REJECT: return GINGLE_ACTION_REJECT; case ACTION_SESSION_TERMINATE: return GINGLE_ACTION_TERMINATE; case ACTION_TRANSPORT_INFO: return GINGLE_ACTION_CANDIDATES; default: return ""; } } bool IsJingleMessage(const buzz::XmlElement* stanza) { const buzz::XmlElement* jingle = stanza->FirstNamed(QN_JINGLE); if (jingle == NULL) return false; return (jingle->HasAttr(buzz::QN_ACTION) && jingle->HasAttr(QN_SID)); } bool IsGingleMessage(const buzz::XmlElement* stanza) { const buzz::XmlElement* session = stanza->FirstNamed(QN_GINGLE_SESSION); if (session == NULL) return false; return (session->HasAttr(buzz::QN_TYPE) && session->HasAttr(buzz::QN_ID) && session->HasAttr(QN_INITIATOR)); } bool IsSessionMessage(const buzz::XmlElement* stanza) { return (stanza->Name() == buzz::QN_IQ && stanza->Attr(buzz::QN_TYPE) == buzz::STR_SET && (IsJingleMessage(stanza) || IsGingleMessage(stanza))); } bool ParseGingleSessionMessage(const buzz::XmlElement* session, SessionMessage* msg, ParseError* error) { msg->protocol = PROTOCOL_GINGLE; std::string type_string = session->Attr(buzz::QN_TYPE); msg->type = ToActionType(type_string); msg->sid = session->Attr(buzz::QN_ID); msg->initiator = session->Attr(QN_INITIATOR); msg->action_elem = session; if (msg->type == ACTION_UNKNOWN) return BadParse("unknown action: " + type_string, error); return true; } bool ParseJingleSessionMessage(const buzz::XmlElement* jingle, SessionMessage* msg, ParseError* error) { msg->protocol = PROTOCOL_JINGLE; std::string type_string = jingle->Attr(buzz::QN_ACTION); msg->type = ToActionType(type_string); msg->sid = jingle->Attr(QN_SID); msg->initiator = GetXmlAttr(jingle, QN_INITIATOR, buzz::STR_EMPTY); msg->action_elem = jingle; if (msg->type == ACTION_UNKNOWN) return BadParse("unknown action: " + type_string, error); return true; } bool ParseHybridSessionMessage(const buzz::XmlElement* jingle, SessionMessage* msg, ParseError* error) { if (!ParseJingleSessionMessage(jingle, msg, error)) return false; msg->protocol = PROTOCOL_HYBRID; return true; } bool ParseSessionMessage(const buzz::XmlElement* stanza, SessionMessage* msg, ParseError* error) { msg->id = stanza->Attr(buzz::QN_ID); msg->from = stanza->Attr(buzz::QN_FROM); msg->to = stanza->Attr(buzz::QN_TO); msg->stanza = stanza; const buzz::XmlElement* jingle = stanza->FirstNamed(QN_JINGLE); const buzz::XmlElement* session = stanza->FirstNamed(QN_GINGLE_SESSION); if (jingle && session) return ParseHybridSessionMessage(jingle, msg, error); if (jingle != NULL) return ParseJingleSessionMessage(jingle, msg, error); if (session != NULL) return ParseGingleSessionMessage(session, msg, error); return false; } buzz::XmlElement* WriteGingleAction(const SessionMessage& msg, const XmlElements& action_elems) { buzz::XmlElement* session = new buzz::XmlElement(QN_GINGLE_SESSION, true); session->AddAttr(buzz::QN_TYPE, ToGingleString(msg.type)); session->AddAttr(buzz::QN_ID, msg.sid); session->AddAttr(QN_INITIATOR, msg.initiator); AddXmlChildren(session, action_elems); return session; } buzz::XmlElement* WriteJingleAction(const SessionMessage& msg, const XmlElements& action_elems) { buzz::XmlElement* jingle = new buzz::XmlElement(QN_JINGLE, true); jingle->AddAttr(buzz::QN_ACTION, ToJingleString(msg.type)); jingle->AddAttr(QN_SID, msg.sid); if (msg.type == ACTION_SESSION_INITIATE) { jingle->AddAttr(QN_INITIATOR, msg.initiator); } AddXmlChildren(jingle, action_elems); return jingle; } void WriteSessionMessage(const SessionMessage& msg, const XmlElements& action_elems, buzz::XmlElement* stanza) { stanza->SetAttr(buzz::QN_TO, msg.to); stanza->SetAttr(buzz::QN_TYPE, buzz::STR_SET); if (msg.protocol == PROTOCOL_GINGLE) { stanza->AddElement(WriteGingleAction(msg, action_elems)); } else { stanza->AddElement(WriteJingleAction(msg, action_elems)); } } TransportParser* GetTransportParser(const TransportParserMap& trans_parsers, const std::string& name) { TransportParserMap::const_iterator map = trans_parsers.find(name); if (map == trans_parsers.end()) { return NULL; } else { return map->second; } } bool ParseCandidates(SignalingProtocol protocol, const buzz::XmlElement* candidates_elem, const TransportParserMap& trans_parsers, const std::string& transport_type, Candidates* candidates, ParseError* error) { TransportParser* trans_parser = GetTransportParser(trans_parsers, transport_type); if (trans_parser == NULL) return BadParse("unknown transport type: " + transport_type, error); return trans_parser->ParseCandidates(protocol, candidates_elem, candidates, error); } bool ParseGingleTransportInfos(const buzz::XmlElement* action_elem, const ContentInfos& contents, const TransportParserMap& trans_parsers, TransportInfos* tinfos, ParseError* error) { TransportInfo tinfo(CN_OTHER, NS_GINGLE_P2P, Candidates()); if (!ParseCandidates(PROTOCOL_GINGLE, action_elem, trans_parsers, NS_GINGLE_P2P, &tinfo.candidates, error)) return false; bool has_audio = FindContentInfoByName(contents, CN_AUDIO) != NULL; bool has_video = FindContentInfoByName(contents, CN_VIDEO) != NULL; // If we don't have media, no need to separate the candidates. if (!has_audio && !has_video) { tinfos->push_back(tinfo); return true; } // If we have media, separate the candidates. Create the // TransportInfo here to avoid copying the candidates. TransportInfo audio_tinfo(CN_AUDIO, NS_GINGLE_P2P, Candidates()); TransportInfo video_tinfo(CN_VIDEO, NS_GINGLE_P2P, Candidates()); for (Candidates::iterator cand = tinfo.candidates.begin(); cand != tinfo.candidates.end(); cand++) { if (cand->name() == GINGLE_CANDIDATE_NAME_RTP || cand->name() == GINGLE_CANDIDATE_NAME_RTCP) { audio_tinfo.candidates.push_back(*cand); } else if (cand->name() == GINGLE_CANDIDATE_NAME_VIDEO_RTP || cand->name() == GINGLE_CANDIDATE_NAME_VIDEO_RTCP) { video_tinfo.candidates.push_back(*cand); } } if (has_audio) { tinfos->push_back(audio_tinfo); } if (has_video) { tinfos->push_back(video_tinfo); } return true; } bool ParseJingleTransportInfo(const buzz::XmlElement* trans_elem, const ContentInfo& content, const TransportParserMap& trans_parsers, TransportInfos* tinfos, ParseError* error) { std::string transport_type = trans_elem->Name().Namespace(); TransportInfo tinfo(content.name, transport_type, Candidates()); if (!ParseCandidates(PROTOCOL_JINGLE, trans_elem, trans_parsers, transport_type, &tinfo.candidates, error)) return false; tinfos->push_back(tinfo); return true; } bool ParseJingleTransportInfos(const buzz::XmlElement* jingle, const ContentInfos& contents, const TransportParserMap trans_parsers, TransportInfos* tinfos, ParseError* error) { for (const buzz::XmlElement* pair_elem = jingle->FirstNamed(QN_JINGLE_CONTENT); pair_elem != NULL; pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) { std::string content_name; if (!RequireXmlAttr(pair_elem, QN_JINGLE_CONTENT_NAME, &content_name, error)) return false; const ContentInfo* content = FindContentInfoByName(contents, content_name); if (!content) return BadParse("Unknown content name: " + content_name, error); const buzz::XmlElement* trans_elem; if (!RequireXmlChild(pair_elem, LN_TRANSPORT, &trans_elem, error)) return false; if (!ParseJingleTransportInfo(trans_elem, *content, trans_parsers, tinfos, error)) return false; } return true; } buzz::XmlElement* NewTransportElement(const std::string& name) { return new buzz::XmlElement(buzz::QName(name, LN_TRANSPORT), true); } bool WriteCandidates(SignalingProtocol protocol, const std::string& trans_type, const Candidates& candidates, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { TransportParser* trans_parser = GetTransportParser(trans_parsers, trans_type); if (trans_parser == NULL) return BadWrite("unknown transport type: " + trans_type, error); return trans_parser->WriteCandidates(protocol, candidates, elems, error); } bool WriteGingleTransportInfos(const TransportInfos& tinfos, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { for (TransportInfos::const_iterator tinfo = tinfos.begin(); tinfo != tinfos.end(); ++tinfo) { if (!WriteCandidates(PROTOCOL_GINGLE, tinfo->transport_type, tinfo->candidates, trans_parsers, elems, error)) return false; } return true; } bool WriteJingleTransportInfo(const TransportInfo& tinfo, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { XmlElements candidate_elems; if (!WriteCandidates(PROTOCOL_JINGLE, tinfo.transport_type, tinfo.candidates, trans_parsers, &candidate_elems, error)) return false; buzz::XmlElement* trans_elem = NewTransportElement(tinfo.transport_type); AddXmlChildren(trans_elem, candidate_elems); elems->push_back(trans_elem); return true; } void WriteJingleContentPair(const std::string name, const XmlElements& pair_elems, XmlElements* elems) { buzz::XmlElement* pair_elem = new buzz::XmlElement(QN_JINGLE_CONTENT); pair_elem->SetAttr(QN_JINGLE_CONTENT_NAME, name); pair_elem->SetAttr(QN_CREATOR, LN_INITIATOR); AddXmlChildren(pair_elem, pair_elems); elems->push_back(pair_elem); } bool WriteJingleTransportInfos(const TransportInfos& tinfos, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { for (TransportInfos::const_iterator tinfo = tinfos.begin(); tinfo != tinfos.end(); ++tinfo) { XmlElements pair_elems; if (!WriteJingleTransportInfo(*tinfo, trans_parsers, &pair_elems, error)) return false; WriteJingleContentPair(tinfo->content_name, pair_elems, elems); } return true; } ContentParser* GetContentParser(const ContentParserMap& content_parsers, const std::string& type) { ContentParserMap::const_iterator map = content_parsers.find(type); if (map == content_parsers.end()) { return NULL; } else { return map->second; } } bool ParseContentInfo(SignalingProtocol protocol, const std::string& name, const std::string& type, const buzz::XmlElement* elem, const ContentParserMap& parsers, ContentInfos* contents, ParseError* error) { ContentParser* parser = GetContentParser(parsers, type); if (parser == NULL) return BadParse("unknown application content: " + type, error); const ContentDescription* desc; if (!parser->ParseContent(protocol, elem, &desc, error)) return false; contents->push_back(ContentInfo(name, type, desc)); return true; } bool ParseContentType(const buzz::XmlElement* parent_elem, std::string* content_type, const buzz::XmlElement** content_elem, ParseError* error) { if (!RequireXmlChild(parent_elem, LN_DESCRIPTION, content_elem, error)) return false; *content_type = (*content_elem)->Name().Namespace(); return true; } bool ParseGingleContentInfos(const buzz::XmlElement* session, const ContentParserMap& content_parsers, ContentInfos* contents, ParseError* error) { std::string content_type; const buzz::XmlElement* content_elem; if (!ParseContentType(session, &content_type, &content_elem, error)) return false; if (content_type == NS_GINGLE_VIDEO) { // A parser parsing audio or video content should look at the // namespace and only parse the codecs relevant to that namespace. // We use this to control which codecs get parsed: first audio, // then video. talk_base::scoped_ptr<buzz::XmlElement> audio_elem( new buzz::XmlElement(QN_GINGLE_AUDIO_CONTENT)); CopyXmlChildren(content_elem, audio_elem.get()); if (!ParseContentInfo(PROTOCOL_GINGLE, CN_AUDIO, NS_JINGLE_RTP, audio_elem.get(), content_parsers, contents, error)) return false; if (!ParseContentInfo(PROTOCOL_GINGLE, CN_VIDEO, NS_JINGLE_RTP, content_elem, content_parsers, contents, error)) return false; } else if (content_type == NS_GINGLE_AUDIO) { if (!ParseContentInfo(PROTOCOL_GINGLE, CN_AUDIO, NS_JINGLE_RTP, content_elem, content_parsers, contents, error)) return false; } else { if (!ParseContentInfo(PROTOCOL_GINGLE, CN_OTHER, content_type, content_elem, content_parsers, contents, error)) return false; } return true; } bool ParseJingleContentInfos(const buzz::XmlElement* jingle, const ContentParserMap& content_parsers, ContentInfos* contents, ParseError* error) { for (const buzz::XmlElement* pair_elem = jingle->FirstNamed(QN_JINGLE_CONTENT); pair_elem != NULL; pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) { std::string content_name; if (!RequireXmlAttr(pair_elem, QN_JINGLE_CONTENT_NAME, &content_name, error)) return false; std::string content_type; const buzz::XmlElement* content_elem; if (!ParseContentType(pair_elem, &content_type, &content_elem, error)) return false; if (!ParseContentInfo(PROTOCOL_JINGLE, content_name, content_type, content_elem, content_parsers, contents, error)) return false; } return true; } bool ParseJingleGroupInfos(const buzz::XmlElement* jingle, ContentGroups* groups, ParseError* error) { for (const buzz::XmlElement* pair_elem = jingle->FirstNamed(QN_JINGLE_DRAFT_GROUP); pair_elem != NULL; pair_elem = pair_elem->NextNamed(QN_JINGLE_DRAFT_GROUP)) { std::string group_name; if (!RequireXmlAttr(pair_elem, QN_JINGLE_DRAFT_GROUP_TYPE, &group_name, error)) return false; ContentGroup group(group_name); for (const buzz::XmlElement* child_elem = pair_elem->FirstNamed(QN_JINGLE_CONTENT); child_elem != NULL; child_elem = child_elem->NextNamed(QN_JINGLE_CONTENT)) { std::string content_name; if (!RequireXmlAttr(child_elem, QN_JINGLE_CONTENT_NAME, &content_name, error)) return false; group.AddContentName(content_name); } groups->push_back(group); } return true; } buzz::XmlElement* WriteContentInfo(SignalingProtocol protocol, const ContentInfo& content, const ContentParserMap& parsers, WriteError* error) { ContentParser* parser = GetContentParser(parsers, content.type); if (parser == NULL) { BadWrite("unknown content type: " + content.type, error); return NULL; } buzz::XmlElement* elem = NULL; if (!parser->WriteContent(protocol, content.description, &elem, error)) return NULL; return elem; } bool WriteGingleContentInfos(const ContentInfos& contents, const ContentParserMap& parsers, XmlElements* elems, WriteError* error) { if (contents.size() == 1) { buzz::XmlElement* elem = WriteContentInfo( PROTOCOL_GINGLE, contents.front(), parsers, error); if (!elem) return false; elems->push_back(elem); } else if (contents.size() == 2 && contents.at(0).type == NS_JINGLE_RTP && contents.at(1).type == NS_JINGLE_RTP) { // Special-case audio + video contents so that they are "merged" // into one "video" content. buzz::XmlElement* audio = WriteContentInfo( PROTOCOL_GINGLE, contents.at(0), parsers, error); if (!audio) return false; buzz::XmlElement* video = WriteContentInfo( PROTOCOL_GINGLE, contents.at(1), parsers, error); if (!video) { delete audio; return false; } CopyXmlChildren(audio, video); elems->push_back(video); delete audio; } else { return BadWrite("Gingle protocol may only have one content.", error); } return true; } const TransportInfo* GetTransportInfoByContentName( const TransportInfos& tinfos, const std::string& content_name) { for (TransportInfos::const_iterator tinfo = tinfos.begin(); tinfo != tinfos.end(); ++tinfo) { if (content_name == tinfo->content_name) { return &*tinfo; } } return NULL; } bool WriteJingleContentPairs(const ContentInfos& contents, const ContentParserMap& content_parsers, const TransportInfos& tinfos, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { for (ContentInfos::const_iterator content = contents.begin(); content != contents.end(); ++content) { const TransportInfo* tinfo = GetTransportInfoByContentName(tinfos, content->name); if (!tinfo) return BadWrite("No transport for content: " + content->name, error); XmlElements pair_elems; buzz::XmlElement* elem = WriteContentInfo( PROTOCOL_JINGLE, *content, content_parsers, error); if (!elem) return false; pair_elems.push_back(elem); if (!WriteJingleTransportInfo(*tinfo, trans_parsers, &pair_elems, error)) return false; WriteJingleContentPair(content->name, pair_elems, elems); } return true; } bool WriteJingleGroupInfo(const ContentInfos& contents, const ContentGroups& groups, XmlElements* elems, WriteError* error) { if (!groups.empty()) { buzz::XmlElement* pair_elem = new buzz::XmlElement(QN_JINGLE_DRAFT_GROUP); pair_elem->SetAttr(QN_JINGLE_DRAFT_GROUP_TYPE, GROUP_TYPE_BUNDLE); XmlElements pair_elems; for (ContentInfos::const_iterator content = contents.begin(); content != contents.end(); ++content) { buzz::XmlElement* child_elem = new buzz::XmlElement(QN_JINGLE_CONTENT, false); child_elem->SetAttr(QN_JINGLE_CONTENT_NAME, content->name); pair_elems.push_back(child_elem); } AddXmlChildren(pair_elem, pair_elems); elems->push_back(pair_elem); } return true; } bool ParseContentType(SignalingProtocol protocol, const buzz::XmlElement* action_elem, std::string* content_type, ParseError* error) { const buzz::XmlElement* content_elem; if (protocol == PROTOCOL_GINGLE) { if (!ParseContentType(action_elem, content_type, &content_elem, error)) return false; // Internally, we only use NS_JINGLE_RTP. if (*content_type == NS_GINGLE_AUDIO || *content_type == NS_GINGLE_VIDEO) *content_type = NS_JINGLE_RTP; } else { const buzz::XmlElement* pair_elem = action_elem->FirstNamed(QN_JINGLE_CONTENT); if (pair_elem == NULL) return BadParse("No contents found", error); if (!ParseContentType(pair_elem, content_type, &content_elem, error)) return false; // If there is more than one content type, return an error. for (; pair_elem != NULL; pair_elem = pair_elem->NextNamed(QN_JINGLE_CONTENT)) { std::string content_type2; if (!ParseContentType(pair_elem, &content_type2, &content_elem, error)) return false; if (content_type2 != *content_type) return BadParse("More than one content type found", error); } } return true; } static bool ParseContentMessage( SignalingProtocol protocol, const buzz::XmlElement* action_elem, bool expect_transports, const ContentParserMap& content_parsers, const TransportParserMap& trans_parsers, SessionInitiate* init, ParseError* error) { init->owns_contents = true; if (protocol == PROTOCOL_GINGLE) { if (!ParseGingleContentInfos(action_elem, content_parsers, &init->contents, error)) return false; if (expect_transports && !ParseGingleTransportInfos(action_elem, init->contents, trans_parsers, &init->transports, error)) return false; } else { if (!ParseJingleContentInfos(action_elem, content_parsers, &init->contents, error)) return false; if (!ParseJingleGroupInfos(action_elem, &init->groups, error)) return false; if (expect_transports && !ParseJingleTransportInfos(action_elem, init->contents, trans_parsers, &init->transports, error)) return false; } return true; } static bool WriteContentMessage( SignalingProtocol protocol, const ContentInfos& contents, const TransportInfos& tinfos, const ContentParserMap& content_parsers, const TransportParserMap& transport_parsers, const ContentGroups& groups, XmlElements* elems, WriteError* error) { if (protocol == PROTOCOL_GINGLE) { if (!WriteGingleContentInfos(contents, content_parsers, elems, error)) return false; if (!WriteGingleTransportInfos(tinfos, transport_parsers, elems, error)) return false; } else { if (!WriteJingleContentPairs(contents, content_parsers, tinfos, transport_parsers, elems, error)) return false; if (!WriteJingleGroupInfo(contents, groups, elems, error)) return false; } return true; } bool ParseSessionInitiate(SignalingProtocol protocol, const buzz::XmlElement* action_elem, const ContentParserMap& content_parsers, const TransportParserMap& trans_parsers, SessionInitiate* init, ParseError* error) { bool expect_transports = true; return ParseContentMessage(protocol, action_elem, expect_transports, content_parsers, trans_parsers, init, error); } bool WriteSessionInitiate(SignalingProtocol protocol, const ContentInfos& contents, const TransportInfos& tinfos, const ContentParserMap& content_parsers, const TransportParserMap& transport_parsers, const ContentGroups& groups, XmlElements* elems, WriteError* error) { return WriteContentMessage(protocol, contents, tinfos, content_parsers, transport_parsers, groups, elems, error); } bool ParseSessionAccept(SignalingProtocol protocol, const buzz::XmlElement* action_elem, const ContentParserMap& content_parsers, const TransportParserMap& transport_parsers, SessionAccept* accept, ParseError* error) { bool expect_transports = true; return ParseContentMessage(protocol, action_elem, expect_transports, content_parsers, transport_parsers, accept, error); } bool WriteSessionAccept(SignalingProtocol protocol, const ContentInfos& contents, const TransportInfos& tinfos, const ContentParserMap& content_parsers, const TransportParserMap& transport_parsers, const ContentGroups& groups, XmlElements* elems, WriteError* error) { return WriteContentMessage(protocol, contents, tinfos, content_parsers, transport_parsers, groups, elems, error); } bool ParseSessionTerminate(SignalingProtocol protocol, const buzz::XmlElement* action_elem, SessionTerminate* term, ParseError* error) { if (protocol == PROTOCOL_GINGLE) { const buzz::XmlElement* reason_elem = action_elem->FirstElement(); if (reason_elem != NULL) { term->reason = reason_elem->Name().LocalPart(); const buzz::XmlElement *debug_elem = reason_elem->FirstElement(); if (debug_elem != NULL) { term->debug_reason = debug_elem->Name().LocalPart(); } } return true; } else { const buzz::XmlElement* reason_elem = action_elem->FirstNamed(QN_JINGLE_REASON); if (reason_elem) { reason_elem = reason_elem->FirstElement(); if (reason_elem) { term->reason = reason_elem->Name().LocalPart(); } } return true; } } void WriteSessionTerminate(SignalingProtocol protocol, const SessionTerminate& term, XmlElements* elems) { if (protocol == PROTOCOL_GINGLE) { elems->push_back(new buzz::XmlElement(buzz::QName(NS_GINGLE, term.reason))); } else { if (!term.reason.empty()) { buzz::XmlElement* reason_elem = new buzz::XmlElement(QN_JINGLE_REASON); reason_elem->AddElement(new buzz::XmlElement( buzz::QName(NS_JINGLE, term.reason))); elems->push_back(reason_elem); } } } bool ParseDescriptionInfo(SignalingProtocol protocol, const buzz::XmlElement* action_elem, const ContentParserMap& content_parsers, const TransportParserMap& transport_parsers, DescriptionInfo* description_info, ParseError* error) { bool expect_transports = false; return ParseContentMessage(protocol, action_elem, expect_transports, content_parsers, transport_parsers, description_info, error); } bool ParseTransportInfos(SignalingProtocol protocol, const buzz::XmlElement* action_elem, const ContentInfos& contents, const TransportParserMap& trans_parsers, TransportInfos* tinfos, ParseError* error) { if (protocol == PROTOCOL_GINGLE) { return ParseGingleTransportInfos( action_elem, contents, trans_parsers, tinfos, error); } else { return ParseJingleTransportInfos( action_elem, contents, trans_parsers, tinfos, error); } } bool WriteTransportInfos(SignalingProtocol protocol, const TransportInfos& tinfos, const TransportParserMap& trans_parsers, XmlElements* elems, WriteError* error) { if (protocol == PROTOCOL_GINGLE) { return WriteGingleTransportInfos(tinfos, trans_parsers, elems, error); } else { return WriteJingleTransportInfos(tinfos, trans_parsers, elems, error); } } bool GetUriTarget(const std::string& prefix, const std::string& str, std::string* after) { size_t pos = str.find(prefix); if (pos == std::string::npos) return false; *after = str.substr(pos + prefix.size(), std::string::npos); return true; } bool FindSessionRedirect(const buzz::XmlElement* stanza, SessionRedirect* redirect) { const buzz::XmlElement* error_elem = GetXmlChild(stanza, LN_ERROR); if (error_elem == NULL) return false; const buzz::XmlElement* redirect_elem = error_elem->FirstNamed(QN_GINGLE_REDIRECT); if (redirect_elem == NULL) redirect_elem = error_elem->FirstNamed(buzz::QN_STANZA_REDIRECT); if (redirect_elem == NULL) return false; if (!GetUriTarget(STR_REDIRECT_PREFIX, redirect_elem->BodyText(), &redirect->target)) return false; return true; } } // namespace cricket
Java
autocommit off; create table t (a int); create view v1 as select * from t where a > 10 with check option; create view v as select * from v1 where a > 5 with cascaded check option; insert into v values (7); rollback;
Java
from __future__ import division, absolute_import, print_function import collections import tempfile import sys import shutil import warnings import operator import io import itertools if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins from decimal import Decimal import numpy as np from nose import SkipTest from numpy.compat import asbytes, getexception, strchar, unicode, sixu from test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, test_inplace_increment, get_buffer_info, test_as_c_array ) from numpy.testing import ( TestCase, run_module_suite, assert_, assert_raises, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_array_less, runstring, dec ) # Need to test an object that does not fully implement math interface from datetime import timedelta if sys.version_info[:2] > (3, 2): # In Python 3.3 the representation of empty shape, strides and suboffsets # is an empty tuple instead of None. # http://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () else: EMPTY = None class TestFlags(TestCase): def setUp(self): self.a = np.arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_otherflags(self): assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) assert_equal(self.a.flags.forc, True) assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) assert_equal(self.a.flags.updateifcopy, False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) # not power of two are accessed bytewise and thus considered aligned a = np.zeros(5, dtype=np.dtype('|S4')) assert_(a.flags.aligned) def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) class TestHash(TestCase): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) class TestAttributes(TestCase): def setUp(self): self.one = np.arange(10) self.two = np.arange(20).reshape(4, 5) self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) assert_equal(self.two.shape, (4, 5)) assert_equal(self.three.shape, (2, 5, 6)) self.three.shape = (10, 3, 2) assert_equal(self.three.shape, (10, 3, 2)) self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) num = self.three.itemsize assert_equal(self.three.strides, (30*num, 6*num, num)) assert_equal(self.one.ndim, 1) assert_equal(self.two.ndim, 2) assert_equal(self.three.ndim, 3) num = self.two.itemsize assert_equal(self.two.size, 20) assert_equal(self.two.nbytes, 20*num) assert_equal(self.two.itemsize, self.two.dtype.itemsize) assert_equal(self.two.base, np.arange(20)) def test_dtypeattr(self): assert_equal(self.one.dtype, np.dtype(np.int_)) assert_equal(self.three.dtype, np.dtype(np.float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') self.assertTrue(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core.multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True) def test_stridesattr(self): x = self.one def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, offset=offset*x.itemsize, strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): x = self.one def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) except: raise RuntimeError(getexception()) r.strides = strides = strides*x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) self.assertRaises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": x = np.empty((3, 2, 1), t) y = np.empty((3, 2, 1), t) x.fill(1) y[...] = 1 assert_equal(x, y) def test_fill_max_uint64(self): x = np.empty((3, 2, 1), dtype=np.uint64) y = np.empty((3, 2, 1), dtype=np.uint64) value = 2**64 - 1 y[...] = value x.fill(value) assert_array_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted # to a scalar x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) x.fill((3.5, -2)) assert_array_equal(x['a'], [3.5, 3.5]) assert_array_equal(x['b'], [-2, -2]) class TestArrayConstruction(TestCase): def test_array(self): d = np.ones(6) r = np.array([d, d]) assert_equal(r, np.ones((2, 6))) d = np.ones(6) tgt = np.ones((2, 6)) r = np.array([d, d]) assert_equal(r, tgt) tgt[1] = 2 r = np.array([d, d + 1]) assert_equal(r, tgt) d = np.ones(6) r = np.array([[d, d]]) assert_equal(r, np.ones((1, 2, 6))) d = np.ones(6) r = np.array([[d, d], [d, d]]) assert_equal(r, np.ones((2, 2, 6))) d = np.ones((6, 6)) r = np.array([d, d]) assert_equal(r, np.ones((2, 6, 6))) d = np.ones((6, )) r = np.array([[d, d + 1], d + 2]) assert_equal(len(r), 2) assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) tgt = np.ones((2, 3), dtype=np.bool) tgt[0, 2] = False tgt[1, 0:2] = False r = np.array([[True, True, False], [False, False, True]]) assert_equal(r, tgt) r = np.array([[True, False], [True, False], [False, True]]) assert_equal(r, tgt.T) def test_array_empty(self): assert_raises(TypeError, np.array) def test_array_copy_false(self): d = np.array([1, 2, 3]) e = np.array(d, copy=False) d[1] = 3 assert_array_equal(e, [1, 3, 3]) e = np.array(d, copy=False, order='F') d[1] = 4 assert_array_equal(e, [1, 4, 3]) e[2] = 7 assert_array_equal(d, [1, 4, 7]) def test_array_copy_true(self): d = np.array([[1,2,3], [1, 2, 3]]) e = np.array(d, copy=True) d[0, 1] = 3 e[0, 2] = -7 assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) e = np.array(d, copy=True, order='F') d[0, 1] = 5 e[0, 2] = 7 assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1,2,3]]) def test_array_cont(self): d = np.ones(10)[::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.ascontiguousarray(d).flags.f_contiguous) assert_(np.asfortranarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) d = np.ones((10, 10))[::2,::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) class TestAssignment(TestCase): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) assert_equal(a, [[0, 1, 2], [0, 1, 2]]) a[...] = np.arange(2).reshape(2, 1) assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. # # This behavior is inconsistent with NumPy broadcasting # in general, because it only uses one of the two broadcasting # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. a[...] = np.arange(6)[::-1].reshape(1, 2, 3) assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. def assign(a, b): a[...] = b assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) def test_assignment_errors(self): # Address issue #2276 class C: pass a = np.zeros(1) def assign(v): a[0] = v assert_raises((AttributeError, TypeError), assign, C()) assert_raises(ValueError, assign, [1]) class TestDtypedescr(TestCase): def test_construction(self): d1 = np.dtype('i4') assert_equal(d1, np.dtype(np.int32)) d2 = np.dtype('f8') assert_equal(d2, np.dtype(np.float64)) def test_byteorders(self): self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4')) self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')])) class TestZeroRank(TestCase): def setUp(self): self.d = np.array(0), np.array('x', object) def test_ellipsis_subscript(self): a, b = self.d self.assertEqual(a[...], 0) self.assertEqual(b[...], 'x') self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9. self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): a, b = self.d self.assertEqual(a[()], 0) self.assertEqual(b[()], 'x') self.assertTrue(type(a[()]) is a.dtype.type) self.assertTrue(type(b[()]) is str) def test_invalid_subscript(self): a, b = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[0], b) self.assertRaises(IndexError, lambda x: x[np.array([], int)], a) self.assertRaises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): a, b = self.d a[...] = 42 self.assertEqual(a, 42) b[...] = '' self.assertEqual(b.item(), '') def test_empty_subscript_assignment(self): a, b = self.d a[()] = 42 self.assertEqual(a, 42) b[()] = '' self.assertEqual(b.item(), '') def test_invalid_subscript_assignment(self): a, b = self.d def assign(x, i, v): x[i] = v self.assertRaises(IndexError, assign, a, 0, 42) self.assertRaises(IndexError, assign, b, 0, '') self.assertRaises(ValueError, assign, a, (), '') def test_newaxis(self): a, b = self.d self.assertEqual(a[np.newaxis].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ...].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1)) self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a, b = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (np.newaxis, 0)) self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50) def test_constructor(self): x = np.ndarray(()) x[()] = 5 self.assertEqual(x[()], 5) y = np.ndarray((), buffer=x) y[()] = 6 self.assertEqual(x[()], 6) def test_output(self): x = np.array(2) self.assertRaises(ValueError, np.add, x, [1], x) class TestScalarIndexing(TestCase): def setUp(self): self.d = np.array([0, 1])[0] def test_ellipsis_subscript(self): a = self.d self.assertEqual(a[...], 0) self.assertEqual(a[...].shape, ()) def test_empty_subscript(self): a = self.d self.assertEqual(a[()], 0) self.assertEqual(a[()].shape, ()) def test_invalid_subscript(self): a = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): a = self.d def assign(x, i, v): x[i] = v self.assertRaises(TypeError, assign, a, 0, 42) def test_newaxis(self): a = self.d self.assertEqual(a[np.newaxis].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ...].shape, (1,)) self.assertEqual(a[..., np.newaxis].shape, (1,)) self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1)) self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (np.newaxis, 0)) self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50) def test_overlapping_assignment(self): # With positive strides a = np.arange(4) a[:-1] = a[1:] assert_equal(a, [1, 2, 3, 3]) a = np.arange(4) a[1:] = a[:-1] assert_equal(a, [0, 0, 1, 2]) # With positive and negative strides a = np.arange(4) a[:] = a[::-1] assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) a[::-1,:] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) a[::-1, ::-1] = a[:, ::-1] assert_equal(a, [[3, 4, 5], [0, 1, 2]]) # With just one element overlapping a = np.arange(5) a[:3] = a[2:] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[2:] = a[:3] assert_equal(a, [0, 1, 0, 1, 2]) a = np.arange(5) a[2::-1] = a[2:] assert_equal(a, [4, 3, 2, 3, 4]) a = np.arange(5) a[2:] = a[2::-1] assert_equal(a, [0, 1, 2, 1, 0]) a = np.arange(5) a[2::-1] = a[:1:-1] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[:1:-1] = a[2::-1] assert_equal(a, [0, 1, 0, 1, 2]) class TestCreation(TestCase): def test_from_attribute(self): class x(object): def __array__(self, dtype=None): pass self.assertRaises(ValueError, np.array, x()) def test_from_string(self): types = np.typecodes['AllInteger'] + np.typecodes['Float'] nstr = ['123', '123'] result = np.array([123, 123], dtype=int) for type in types: msg = 'String conversion for %s' % type assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): arr = np.array([], dtype='V') assert_equal(arr.dtype.kind, 'V') def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((13,), dtype=dt) assert_equal(np.count_nonzero(d), 0) # true for ieee floats assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='4i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') assert_equal(np.count_nonzero(d), 0) @dec.slow def test_zeros_big(self): # test big array as they might be allocated different by the sytem types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) assert_(not d.any()) def test_zeros_obj(self): # test initialization from PyLong(0) d = np.zeros((13,), dtype=object) assert_array_equal(d, [0] * 13) assert_equal(np.count_nonzero(d), 0) def test_zeros_obj_obj(self): d = np.zeros(10, dtype=[('k', object, 2)]) assert_array_equal(d['k'], 0) def test_zeros_like_like_zeros(self): # test zeros_like returns the same as zeros for c in np.typecodes['All']: if c == 'V': continue d = np.zeros((3,3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases d = np.zeros((3,3), dtype='S5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='U5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='f4,f4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) def test_empty_unicode(self): # don't throw decode errors on garbage memory for i in range(5, 100, 5): d = np.empty(i, dtype='U') str(d) def test_sequence_non_homogenous(self): assert_equal(np.array([4, 2**80]).dtype, np.object) assert_equal(np.array([4, 2**80, 4]).dtype, np.object) assert_equal(np.array([2**80, 4]).dtype, np.object) assert_equal(np.array([2**80] * 3).dtype, np.object) assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex) assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex) assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex) @dec.skipif(sys.version_info[0] >= 3) def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) assert_equal(np.array([long(4), 2**80]).dtype, np.object) assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) assert_equal(np.array([2**80, long(4)]).dtype, np.object) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail(object): def __len__(self): return 1 def __getitem__(self, index): raise ValueError() class Map(object): def __len__(self): return 1 def __getitem__(self, index): raise KeyError() a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: def __init__(self): pass def __getitem__(self, ind): if ind in [0, 1]: return ind else: raise IndexError() d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) class TestStructured(TestCase): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) # Since the subarray is always in C-order, a transpose # does not swap the subarray: assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) # In Fortran order, the subarray gets appended # like in all other cases, not prepended as a special case b = a.copy(order='F') assert_equal(a['a'].shape, b['a'].shape) assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) def test_subarray_comparison(self): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a == b, [True, True]) assert_equal(a != b, [False, False]) b[1].b = 'c' assert_equal(a == b, [True, False]) assert_equal(a != b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 assert_equal(a == b, [False, False]) assert_equal(a != b, [True, True]) for i in range(2): for j in range(2): b = a.copy() b[0].c[i, j] = 10 assert_equal(a == b, [False, True]) assert_equal(a != b, [True, False]) # Check that broadcasting with a subarray works a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) assert_equal(x == y, False) x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) assert_equal(x == y, False) # Check that structured arrays that are different only in # byte-order work a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')]) b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')]) assert_equal(a == b, [False, True]) def test_casting(self): # Check that casting a structured array to change its byte order # works a = np.array([(1,)], dtype=[('a', '<i4')]) assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe')) b = a.astype([('a', '>i4')]) assert_equal(b, a.byteswap().newbyteorder()) assert_equal(a['a'][0], b['a'][0]) # Check that equality comparison works on structured arrays if # they are 'equiv'-castable a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')]) b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')]) assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) assert_equal(a == b, [True, True]) # Check that 'equiv' casting can reorder fields and change byte # order assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) c = a.astype(b.dtype, casting='equiv') assert_equal(a == c, [True, True]) # Check that 'safe' casting can change byte order and up-cast # fields t = [('a', '<i8'), ('b', '>f8')] assert_(np.can_cast(a.dtype, t, casting='safe')) c = a.astype(t, casting='safe') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that 'same_kind' casting can change byte order and # change field widths within a "kind" t = [('a', '<i4'), ('b', '>f4')] assert_(np.can_cast(a.dtype, t, casting='same_kind')) c = a.astype(t, casting='same_kind') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that casting fails if the casting rule should fail on # any of the fields t = [('a', '>i8'), ('b', '<f4')] assert_(not np.can_cast(a.dtype, t, casting='safe')) assert_raises(TypeError, a.astype, t, casting='safe') t = [('a', '>i2'), ('b', '<f8')] assert_(not np.can_cast(a.dtype, t, casting='equiv')) assert_raises(TypeError, a.astype, t, casting='equiv') t = [('a', '>i8'), ('b', '<i2')] assert_(not np.can_cast(a.dtype, t, casting='same_kind')) assert_raises(TypeError, a.astype, t, casting='same_kind') assert_(not np.can_cast(a.dtype, b.dtype, casting='no')) assert_raises(TypeError, a.astype, b.dtype, casting='no') # Check that non-'unsafe' casting can't change the set of field names for casting in ['no', 'safe', 'equiv', 'same_kind']: t = [('a', '>i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) def test_objview(self): # https://github.com/numpy/numpy/issues/3286 a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')]) a[['a', 'b']] # TypeError? # https://github.com/numpy/numpy/issues/3253 dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')]) dat2[['B', 'A']] # TypeError? def test_setfield(self): # https://github.com/numpy/numpy/issues/3126 struct_dt = np.dtype([('elem', 'i4', 5),]) dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) x = np.zeros(1, dt) x[0]['field'] = np.ones(10, dtype='i4') x[0]['struct'] = np.ones(1, dtype=struct_dt) assert_equal(x[0]['field'], np.ones(10, dtype='i4')) def test_setfield_object(self): # make sure object field assignment with ndarray value # on void scalar mimics setitem behavior b = np.zeros(1, dtype=[('x', 'O')]) # next line should work identically to b['x'][0] = np.arange(3) b[0]['x'] = np.arange(3) assert_equal(b[0]['x'], np.arange(3)) #check that broadcasting check still works c = np.zeros(1, dtype=[('x', 'O', 5)]) def testassign(): c[0]['x'] = np.arange(3) assert_raises(ValueError, testassign) class TestBool(TestCase): def test_test_interning(self): a0 = np.bool_(0) b0 = np.bool_(False) self.assertTrue(a0 is b0) a1 = np.bool_(1) b1 = np.bool_(True) self.assertTrue(a1 is b1) self.assertTrue(np.array([True])[0] is a1) self.assertTrue(np.array(True)[()] is a1) def test_sum(self): d = np.ones(101, dtype=np.bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) def check_count_nonzero(self, power, length): powers = [2 ** i for i in range(length)] for i in range(2**power): l = [(i & x) != 0 for x in powers] a = np.array(l, dtype=np.bool) c = builtins.sum(l) self.assertEqual(np.count_nonzero(a), c) av = a.view(np.uint8) av *= 3 self.assertEqual(np.count_nonzero(a), c) av *= 4 self.assertEqual(np.count_nonzero(a), c) av[av != 0] = 0xFF self.assertEqual(np.count_nonzero(a), c) def test_count_nonzero(self): # check all 12 bit combinations in a length 17 array # covers most cases of the 16 byte unrolled code self.check_count_nonzero(12, 17) @dec.slow def test_count_nonzero_all(self): # check all combinations in a length 17 array # covers all cases of the 16 byte unrolled code self.check_count_nonzero(17, 17) def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): a = np.zeros((18,), dtype=np.bool)[o+1:] a[:o] = True self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) a = np.ones((18,), dtype=np.bool)[o+1:] a[:o] = False self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) class TestMethods(TestCase): def test_round(self): def check_round(arr, expected, *round_args): assert_equal(arr.round(*round_args), expected) # With output array out = np.zeros_like(arr) res = arr.round(*round_args, out=out) assert_equal(out, expected) assert_equal(out, res) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) # Complex rounding check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) self.assertRaises(ValueError, lambda: a.transpose(0)) self.assertRaises(ValueError, lambda: a.transpose(0, 0)) self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only # necessary to check the lessthan comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. # check doubles msg = "Test real sort order with nans" a = np.array([np.nan, 1, 0]) b = np.sort(a) assert_equal(b, a[::-1], msg) # check complex msg = "Test complex sort order with nans" a = np.zeros(9, dtype=np.complex128) a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] b = np.sort(a) assert_equal(b, a[::-1], msg) # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test complex sorts. These use the same code as the scalars # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) c = arr.copy() c.sort() msg = 'byte-swapped complex sort, dtype={0}'.format(dt) assert_equal(c, arr, msg) # test string sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "string sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test unicode sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "unicode sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test object array sorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test datetime64 sorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test timedelta64 sorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 0], [3, 2]]) c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") d = a.copy() d.sort(axis=1) assert_equal(d, c, "test sort with axis=1") d = a.copy() d.sort() assert_equal(d, c, "test sort with default axis") # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array sort with axis={0}'.format(axis) assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) def test_copy(self): def assert_fortran(arr): assert_(arr.flags.fortran) assert_(arr.flags.f_contiguous) assert_(not arr.flags.c_contiguous) def assert_c(arr): assert_(not arr.flags.fortran) assert_(not arr.flags.f_contiguous) assert_(arr.flags.c_contiguous) a = np.empty((2, 2), order='F') # Test copying a Fortran array assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_fortran(a.copy('A')) # Now test starting with a C array. a = np.empty((2, 2), order='C') assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_c(a.copy('A')) def test_sort_order(self): # Test sorting an array with fields x1 = np.array([21, 32, 14]) x2 = np.array(['my', 'first', 'name']) x3 = np.array([3.1, 4.5, 6.2]) r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) assert_equal(r.id, np.array([14, 21, 32])) assert_equal(r.word, np.array(['name', 'my', 'first'])) assert_equal(r.number, np.array([6.2, 3.1, 4.5])) r.sort(order=['word']) assert_equal(r.id, np.array([32, 21, 14])) assert_equal(r.word, np.array(['first', 'my', 'name'])) assert_equal(r.number, np.array([4.5, 3.1, 6.2])) r.sort(order=['number']) assert_equal(r.id, np.array([21, 32, 14])) assert_equal(r.word, np.array(['my', 'first', 'name'])) assert_equal(r.number, np.array([3.1, 4.5, 6.2])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '<i2' mydtype = [('name', strchar + '5'), ('col2', strtype)] r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype=mydtype) r.sort(order='col2') assert_equal(r['col2'], [1, 3, 255, 258]) assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)) def test_argsort(self): # all c scalar argsorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test argsort of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) # test string argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "string argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test unicode argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "unicode argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "object argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "structured array argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test datetime64 argsorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test timedelta64 argsorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 1], [0, 0]]) c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) # using None is known fail at this point #assert_equal(a.copy().argsort(axis=None, c) # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argsort with axis={0}'.format(axis) assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' assert_equal(np.argsort(a, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) # check that stable argsorts are stable r = np.arange(100) # scalars a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex a = np.zeros(100, dtype=np.complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) assert_equal(a.argsort(kind='m'), r) # unicode a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) def test_sort_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.sort, kind=k) assert_raises(ValueError, d.argsort, kind=k) def test_searchsorted(self): # test for floats and complex containing nans. The logic is the # same for all float types so only test double types for now. # The search sorted routines use the compare functions for the # array type, so this checks if that is consistent with the sort # order. # check double a = np.array([0, 1, np.nan]) msg = "Test real searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] msg = "Test complex searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" a = np.array([0, 128], dtype='<i4') b = a.searchsorted(np.array(128, dtype='<i4')) assert_equal(b, 1, msg) msg = "Test searchsorted with big endian, side='l'" a = np.array([0, 128], dtype='>i4') b = a.searchsorted(np.array(128, dtype='>i4')) assert_equal(b, 1, msg) # Check 0 elements a = np.ones(0) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 0]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 0, 0]) a = np.ones(1) # Check 1 element b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 1]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 1, 1]) # Check all elements equal a = np.ones(2) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 2]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 2, 2]) # Test searching unaligned array a = np.arange(10) aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) unaligned[:] = a # Test searching unaligned array b = unaligned.searchsorted(a, 'l') assert_equal(b, a) b = unaligned.searchsorted(a, 'r') assert_equal(b, a + 1) # Test searching for unaligned keys b = a.searchsorted(unaligned, 'l') assert_equal(b, a) b = a.searchsorted(unaligned, 'r') assert_equal(b, a + 1) # Test smart resetting of binsearch indices a = np.arange(5) b = a.searchsorted([6, 5, 4], 'l') assert_equal(b, [5, 5, 4]) b = a.searchsorted([6, 5, 4], 'r') assert_equal(b, [5, 5, 5]) # Test all type specific binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.arange(2, dtype=dt) out = np.arange(2) else: a = np.arange(0, 5, dtype=dt) out = np.arange(5) b = a.searchsorted(a, 'l') assert_equal(b, out) b = a.searchsorted(a, 'r') assert_equal(b, out + 1) def test_searchsorted_unicode(self): # Test searchsorted on unicode strings. # 1.6.1 contained a string length miscalculation in # arraytypes.c.src:UNICODE_compare() which manifested as # incorrect/inconsistent results from searchsorted. a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], dtype=np.unicode) ind = np.arange(len(a)) assert_equal([a.searchsorted(v, 'left') for v in a], ind) assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) def test_searchsorted_with_sorter(self): a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) # bounds check assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) a = np.random.rand(300) s = a.argsort() b = np.sort(a) k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) a = np.array([0, 1, 2, 3, 5]*20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] assert_equal(a.searchsorted(k, side='l', sorter=s), expected) expected = [20, 40, 60, 80, 100] assert_equal(a.searchsorted(k, side='r', sorter=s), expected) # Test searching unaligned array keys = np.arange(10) a = keys.copy() np.random.shuffle(s) s = a.argsort() aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) # Test searching unaligned array unaligned[:] = a b = unaligned.searchsorted(keys, 'l', s) assert_equal(b, keys) b = unaligned.searchsorted(keys, 'r', s) assert_equal(b, keys + 1) # Test searching for unaligned keys unaligned[:] = keys b = a.searchsorted(unaligned, 'l', s) assert_equal(b, keys) b = a.searchsorted(unaligned, 'r', s) assert_equal(b, keys + 1) # Test all type specific indirect binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.array([1, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([1, 0], dtype=np.int16) out = np.array([1, 0]) else: a = np.array([3, 4, 1, 2, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([4, 2, 3, 0, 1], dtype=np.int16) out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) # Test non-contiguous sorter array a = np.array([3, 4, 1, 2, 0]) srt = np.empty((10,), dtype=np.intp) srt[1::2] = -1 srt[::2] = [4, 2, 3, 0, 1] s = srt[::2] out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) def test_argpartition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.argpartition, 10) assert_raises(ValueError, d.argpartition, -11) # Test also for generic type argpartition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.argpartition, 10) assert_raises(ValueError, d_obj.argpartition, -11) def test_partition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.partition, 10) assert_raises(ValueError, d.partition, -11) # Test also for generic type partition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.partition, 10) assert_raises(ValueError, d_obj.partition, -11) def test_partition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array partition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), a, msg) msg = 'test empty array partition with axis=None' assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg) def test_argpartition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argpartition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' assert_equal(np.partition(a, 0, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) def test_partition(self): d = np.arange(10) assert_raises(TypeError, np.partition, d, 2, kind=1) assert_raises(ValueError, np.partition, d, 2, kind="nonsense") assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense") assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense") assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense") for k in ("introselect",): d = np.array([]) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(np.argpartition(d, 0, kind=k), d) d = np.ones((1)) assert_array_equal(np.partition(d, 0, kind=k)[0], d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # kth not modified kth = np.array([30, 15, 5]) okth = kth.copy() np.partition(np.arange(40), kth) assert_array_equal(kth, okth) for r in ([2, 1], [1, 2], [1, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) assert_array_equal(d[np.argpartition(d, 2, kind=k)], np.partition(d, 2, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.ones((50)) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # sorted d = np.arange((49)) self.assertEqual(np.partition(d, 5, kind=k)[5], 5) self.assertEqual(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], np.partition(d, 5, kind=k)) assert_array_equal(d[np.argpartition(d, 15, kind=k)], np.partition(d, 15, kind=k)) # rsorted d = np.arange((47))[::-1] self.assertEqual(np.partition(d, 6, kind=k)[6], 6) self.assertEqual(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) assert_array_equal(np.partition(d, -6, kind=k), np.partition(d, 41, kind=k)) assert_array_equal(np.partition(d, -16, kind=k), np.partition(d, 31, kind=k)) assert_array_equal(d[np.argpartition(d, -6, kind=k)], np.partition(d, 41, kind=k)) # median of 3 killer, O(n^2) on pure median 3 pivot quickselect # exercises the median of median of 5 code used to keep O(n) d = np.arange(1000000) x = np.roll(d, d.size // 2) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) d = np.arange(1000001) x = np.roll(d, d.size // 2 + 1) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) # max d = np.ones(10) d[1] = 4 assert_equal(np.partition(d, (2, -1))[-1], 4) assert_equal(np.partition(d, (2, -1))[2], 1) assert_equal(d[np.argpartition(d, (2, -1))][-1], 4) assert_equal(d[np.argpartition(d, (2, -1))][2], 1) d[1] = np.nan assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1])) assert_(np.isnan(np.partition(d, (2, -1))[-1])) # equal elements d = np.arange((47)) % 7 tgt = np.sort(np.arange((47)) % 7) np.random.shuffle(d) for i in range(d.size): self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i]) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9]) kth = [0, 3, 19, 20] assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) d = np.array([2, 1]) d.partition(0, kind=k) assert_raises(ValueError, d.partition, 2) assert_raises(ValueError, d.partition, 3, axis=1) assert_raises(ValueError, np.partition, d, 2) assert_raises(ValueError, np.partition, d, 2, axis=1) assert_raises(ValueError, d.argpartition, 2) assert_raises(ValueError, d.argpartition, 3, axis=1) assert_raises(ValueError, np.argpartition, d, 2) assert_raises(ValueError, np.argpartition, d, 2, axis=1) d = np.arange(10).reshape((2, 5)) d.partition(1, axis=0, kind=k) d.partition(4, axis=1, kind=k) np.partition(d, 1, axis=0, kind=k) np.partition(d, 4, axis=1, kind=k) np.partition(d, 1, axis=None, kind=k) np.partition(d, 9, axis=None, kind=k) d.argpartition(1, axis=0, kind=k) d.argpartition(4, axis=1, kind=k) np.argpartition(d, 1, axis=0, kind=k) np.argpartition(d, 4, axis=1, kind=k) np.argpartition(d, 1, axis=None, kind=k) np.argpartition(d, 9, axis=None, kind=k) assert_raises(ValueError, d.partition, 2, axis=0) assert_raises(ValueError, d.partition, 11, axis=1) assert_raises(TypeError, d.partition, 2, axis=None) assert_raises(ValueError, np.partition, d, 9, axis=1) assert_raises(ValueError, np.partition, d, 11, axis=None) assert_raises(ValueError, d.argpartition, 2, axis=0) assert_raises(ValueError, d.argpartition, 11, axis=1) assert_raises(ValueError, np.argpartition, d, 9, axis=1) assert_raises(ValueError, np.argpartition, d, 11, axis=None) td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] for s in (9, 16)] for dt, s in td: aae = assert_array_equal at = self.assertTrue d = np.arange(s, dtype=dt) np.random.shuffle(d) d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) for i in range(d.size): p = np.partition(d, i, kind=k) self.assertEqual(p[i], i) # all before are smaller assert_array_less(p[:i], p[i]) # all after are larger assert_array_less(p[i], p[i + 1:]) aae(p, d[np.argpartition(d, i, kind=k)]) p = np.partition(d1, i, axis=1, kind=k) aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) at((p[:, i + 1:].T > p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) aae(p[i,:], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i,:] <= p[i,:]).all(), msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) at((p[i + 1:,:] > p[i,:]).all(), msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None,:]]) # check inplace dc = d.copy() dc.partition(i, kind=k) assert_equal(dc, np.partition(d, i, kind=k)) dc = d0.copy() dc.partition(i, axis=0, kind=k) assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) dc = d1.copy() dc.partition(i, axis=1, kind=k) assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) assert_((d[k:] >= d[k]).all(), msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) prev = k + 1 def test_partition_iterative(self): d = np.arange(17) kth = (0, 1, 2, 429, 231) assert_raises(ValueError, d.partition, kth) assert_raises(ValueError, d.argpartition, kth) d = np.arange(10).reshape((2, 5)) assert_raises(ValueError, d.partition, kth, axis=0) assert_raises(ValueError, d.partition, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=None) d = np.array([3, 4, 2, 1]) p = np.partition(d, (0, 3)) self.assert_partitioned(p, (0, 3)) self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) assert_array_equal(p, np.partition(d, (-3, -1))) assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) d = np.arange(17) np.random.shuffle(d) d.partition(range(d.size)) assert_array_equal(np.arange(17), d) np.random.shuffle(d) assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) # test unsorted kth d = np.arange(17) np.random.shuffle(d) keys = np.array([1, 3, 8, -2]) np.random.shuffle(d) p = np.partition(d, keys) self.assert_partitioned(p, keys) p = d[np.argpartition(d, keys)] self.assert_partitioned(p, keys) np.random.shuffle(keys) assert_array_equal(np.partition(d, keys), p) assert_array_equal(d[np.argpartition(d, keys)], p) # equal kth d = np.arange(20)[::-1] self.assert_partitioned(np.partition(d, [5]*4), [5]) self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), [5]*4 + [6, 13]) self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], [5]*4 + [6, 13]) d = np.arange(12) np.random.shuffle(d) d1 = np.tile(np.arange(12), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] assert_array_equal(p, pa) for i in range(d0.shape[1]): self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.9, 38)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) tgt = np.sort(d, order=['age', 'height']) assert_array_equal(np.partition(d, range(d.size), order=['age', 'height']), tgt) assert_array_equal(d[np.argpartition(d, range(d.size), order=['age', 'height'])], tgt) for k in range(d.size): assert_equal(np.partition(d, k, order=['age', 'height'])[k], tgt[k]) assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k], tgt[k]) d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot']) tgt = np.sort(d) assert_array_equal(np.partition(d, range(d.size)), tgt) for k in range(d.size): assert_equal(np.partition(d, k)[k], tgt[k]) assert_equal(d[np.argpartition(d, k)][k], tgt[k]) def test_partition_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.partition, 2, kind=k) assert_raises(ValueError, d.argpartition, 2, kind=k) def test_partition_fuzz(self): # a few rounds of random data testing for j in range(10, 30): for i in range(1, j - 2): d = np.arange(j) np.random.shuffle(d) d = d % np.random.randint(2, 30) idx = np.random.randint(d.size) kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) def test_argpartition_gh5524(self): # A test for functionality of argpartition on lists. d = [6,7,3,2,9,0] p = np.argpartition(d,1) self.assert_partitioned(np.array(d)[p],[1]) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) assert_equal(x0.flatten(), y0) assert_equal(x0.flatten('F'), y0f) assert_equal(x0.flatten('F'), x0.T.flatten()) assert_equal(x1.flatten(), y1) assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) def test_dot(self): a = np.array([[1, 0], [0, 1]]) b = np.array([[0, 1], [1, 0]]) c = np.array([[9, 1], [1, -9]]) assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) # test passing in an output array c = np.zeros_like(a) a.dot(b, c) assert_equal(c, np.dot(a, b)) # test keyword args c = np.zeros_like(a) a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) def test_dot_override(self): class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A() b = B() c = np.array([[1]]) assert_equal(np.dot(a, b), "A") assert_equal(c.dot(a), "A") assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) assert_equal(b.diagonal(1), [[2], [3]]) assert_equal(b.diagonal(-1), [[4], [5]]) assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. a = np.eye(3).diagonal() assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diagonal(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diag(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) def test_diagonal_memleak(self): # Regression test for a bug that crept in at one point a = np.zeros((100, 100)) assert_(sys.getrefcount(a) < 50) for i in range(100): a.diagonal() assert_(sys.getrefcount(a) < 50) def test_put(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] for dt in icodes + fcodes + 'O': tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt.reshape(2, 3)) for dt in '?': tgt = np.array([False, True, False, True, False, True], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt.reshape(2, 3)) # check must be writeable a = np.zeros(6) a.flags.writeable = False assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) assert_(not a.ravel().flags.owndata) assert_equal(a.ravel('F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) a = np.array([[0, 1], [2, 3]], order='F') assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) a = np.array([[0, 1], [2, 3]])[::-1, :] assert_equal(a.ravel(), [2, 3, 0, 1]) assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) # 'K' doesn't reverse the axes of negative strides assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) # Not contiguous and 1-sized axis with non matching stride a = np.arange(2**3 * 2)[::2] a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) # General case of possible ravel that is not contiguous but # works and includes a 1-sized axis with non matching stride a = a.swapaxes(-1, -2) # swap back to C-order assert_(np.may_share_memory(a.ravel(order='C'), a)) assert_(np.may_share_memory(a.ravel(order='K'), a)) a = a.T # swap all to Fortran order assert_(np.may_share_memory(a.ravel(order='F'), a)) assert_(np.may_share_memory(a.ravel(order='K'), a)) # Test negative strides: a = np.arange(4)[::-1].reshape(2, 2) assert_(np.may_share_memory(a.ravel(order='C'), a)) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel('C'), [3, 2, 1, 0]) assert_equal(a.ravel('K'), [3, 2, 1, 0]) # Test keeporder with weirdly strided 1-sized dims (1-d first stride) a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order strides = list(a.strides) strides[0] = -12 strides[-1] = 0 a.strides = strides assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel('K'), a.ravel('C')) # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING): a = np.array([[1]]) a.strides = (123, 432) # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing # them up on purpose: if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) for order in ('C', 'F', 'A', 'K'): # 0-d corner case: a = np.array(0) assert_equal(a.ravel(order), [0]) assert_(np.may_share_memory(a.ravel(order), a)) #Test that certain non-inplace ravels work right (mostly) for 'K': b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) a = b[..., ::2] assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28]) a = b[::2, ...] assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) def test_swapaxes(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) assert_(a.flags['OWNDATA']) b = a.copy() # check exceptions assert_raises(ValueError, a.swapaxes, -5, 0) assert_raises(ValueError, a.swapaxes, 4, 0) assert_raises(ValueError, a.swapaxes, 0, -5) assert_raises(ValueError, a.swapaxes, 0, 4) for i in range(-4, 4): for j in range(-4, 4): for k, src in enumerate((a, b)): c = src.swapaxes(i, j) # check shape shape = list(src.shape) shape[i] = src.shape[j] shape[j] = src.shape[i] assert_equal(c.shape, shape, str((i, j, k))) # check array contents i0, i1, i2, i3 = [dim-1 for dim in c.shape] j0, j1, j2, j3 = [dim-1 for dim in src.shape] assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], c[idx[i0], idx[i1], idx[i2], idx[i3]], str((i, j, k))) # check a view is always returned, gh-5260 assert_(not c.flags['OWNDATA'], str((i, j, k))) # check on non-contiguous input array if k == 1: b = c def test_conjugate(self): a = np.array([1-1j, 1+1j, 23+23.0j]) ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 23+23.0j], 'F') ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1, 2, 3]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1.0, 2.0, 3.0]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 1, 2.0], object) ac = a.conj() assert_equal(ac, [k.conjugate() for k in a]) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1, 2.0, 'f'], object) assert_raises(AttributeError, lambda: a.conj()) assert_raises(AttributeError, lambda: a.conjugate()) class TestBinop(object): def test_inplace(self): # test refcount 1 inplace conversion assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]), [0.5, 1.0]) d = np.array([0.5, 0.5])[::2] assert_array_almost_equal(d * (d * np.array([1.0, 2.0])), [0.25, 0.5]) a = np.array([0.5]) b = np.array([0.5]) c = a + b c = a - b c = a * b c = a / b assert_equal(a, b) assert_almost_equal(c, 1.) c = a + b * 2. / b * a - a / b assert_equal(a, b) assert_equal(c, 0.5) # true divide a = np.array([5]) b = np.array([3]) c = (a * a) / b assert_almost_equal(c, 25 / 3) assert_equal(a, 5) assert_equal(b, 3) def test_extension_incref_elide(self): # test extension (e.g. cython) calling PyNumber_* slots without # increasing the reference counts # # def incref_elide(a): # d = input.copy() # refcount 1 # return d, d + d # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide d = np.ones(5) orig, res = incref_elide(d) # the return original should not be changed to an inplace operation assert_array_equal(orig, d) assert_array_equal(res, d + d) def test_extension_incref_elide_stack(self): # scanning if the refcount == 1 object is on the python stack to check # that we are called directly from python is flawed as object may still # be above the stack pointer and we have no access to the top of it # # def incref_elide_l(d): # return l[4] + l[4] # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide_l # padding with 1 makes sure the object on the stack is not overwriten l = [1, 1, 1, 1, np.ones(5)] res = incref_elide_l(l) # the return original should not be changed to an inplace operation assert_array_equal(l[4], np.ones(5)) assert_array_equal(res, l[4] + l[4]) def test_ufunc_override_rop_precedence(self): # Check that __rmul__ and other right-hand operations have # precedence over __numpy_ufunc__ ops = { '__add__': ('__radd__', np.add, True), '__sub__': ('__rsub__', np.subtract, True), '__mul__': ('__rmul__', np.multiply, True), '__truediv__': ('__rtruediv__', np.true_divide, True), '__floordiv__': ('__rfloordiv__', np.floor_divide, True), '__mod__': ('__rmod__', np.remainder, True), '__divmod__': ('__rdivmod__', None, False), '__pow__': ('__rpow__', np.power, True), '__lshift__': ('__rlshift__', np.left_shift, True), '__rshift__': ('__rrshift__', np.right_shift, True), '__and__': ('__rand__', np.bitwise_and, True), '__xor__': ('__rxor__', np.bitwise_xor, True), '__or__': ('__ror__', np.bitwise_or, True), '__ge__': ('__le__', np.less_equal, False), '__gt__': ('__lt__', np.less, False), '__le__': ('__ge__', np.greater_equal, False), '__lt__': ('__gt__', np.greater, False), '__eq__': ('__eq__', np.equal, False), '__ne__': ('__ne__', np.not_equal, False), } class OtherNdarraySubclass(np.ndarray): pass class OtherNdarraySubclassWithOverride(np.ndarray): def __numpy_ufunc__(self, *a, **kw): raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have " "been called!") % (a, kw)) def check(op_name, ndsubclass): rop_name, np_op, has_iop = ops[op_name] if has_iop: iop_name = '__i' + op_name[2:] iop = getattr(operator, iop_name) if op_name == "__divmod__": op = divmod else: op = getattr(operator, op_name) # Dummy class def __init__(self, *a, **kw): pass def __numpy_ufunc__(self, *a, **kw): raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have " "been called!") % (a, kw)) def __op__(self, *other): return "op" def __rop__(self, *other): return "rop" if ndsubclass: bases = (np.ndarray,) else: bases = (object,) dct = {'__init__': __init__, '__numpy_ufunc__': __numpy_ufunc__, op_name: __op__} if op_name != rop_name: dct[rop_name] = __rop__ cls = type("Rop" + rop_name, bases, dct) # Check behavior against both bare ndarray objects and a # ndarray subclasses with and without their own override obj = cls((1,), buffer=np.ones(1,)) arr_objs = [np.array([1]), np.array([2]).view(OtherNdarraySubclass), np.array([3]).view(OtherNdarraySubclassWithOverride), ] for arr in arr_objs: err_msg = "%r %r" % (op_name, arr,) # Check that ndarray op gives up if it sees a non-subclass if not isinstance(obj, arr.__class__): assert_equal(getattr(arr, op_name)(obj), NotImplemented, err_msg=err_msg) # Check that the Python binops have priority assert_equal(op(obj, arr), "op", err_msg=err_msg) if op_name == rop_name: assert_equal(op(arr, obj), "op", err_msg=err_msg) else: assert_equal(op(arr, obj), "rop", err_msg=err_msg) # Check that Python binops have priority also for in-place ops if has_iop: assert_equal(getattr(arr, iop_name)(obj), NotImplemented, err_msg=err_msg) if op_name != "__pow__": # inplace pow requires the other object to be # integer-like? assert_equal(iop(arr, obj), "rop", err_msg=err_msg) # Check that ufunc call __numpy_ufunc__ normally if np_op is not None: assert_raises(AssertionError, np_op, arr, obj, err_msg=err_msg) assert_raises(AssertionError, np_op, obj, arr, err_msg=err_msg) # Check all binary operations for op_name in sorted(ops.keys()): yield check, op_name, True yield check, op_name, False def test_ufunc_override_rop_simple(self): # Check parts of the binary op overriding behavior in an # explicit test case that is easier to understand. class SomeClass(object): def __numpy_ufunc__(self, *a, **kw): return "ufunc" def __mul__(self, other): return 123 def __rmul__(self, other): return 321 def __rsub__(self, other): return "no subs for me" def __gt__(self, other): return "yep" def __lt__(self, other): return "nope" class SomeClass2(SomeClass, np.ndarray): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): if ufunc is np.multiply or ufunc is np.bitwise_and: return "ufunc" else: inputs = list(inputs) inputs[i] = np.asarray(self) func = getattr(ufunc, method) r = func(*inputs, **kw) if 'out' in kw: return r else: x = self.__class__(r.shape, dtype=r.dtype) x[...] = r return x class SomeClass3(SomeClass2): def __rsub__(self, other): return "sub for me" arr = np.array([0]) obj = SomeClass() obj2 = SomeClass2((1,), dtype=np.int_) obj2[0] = 9 obj3 = SomeClass3((1,), dtype=np.int_) obj3[0] = 4 # obj is first, so should get to define outcome. assert_equal(obj * arr, 123) # obj is second, but has __numpy_ufunc__ and defines __rmul__. assert_equal(arr * obj, 321) # obj is second, but has __numpy_ufunc__ and defines __rsub__. assert_equal(arr - obj, "no subs for me") # obj is second, but has __numpy_ufunc__ and defines __lt__. assert_equal(arr > obj, "nope") # obj is second, but has __numpy_ufunc__ and defines __gt__. assert_equal(arr < obj, "yep") # Called as a ufunc, obj.__numpy_ufunc__ is used. assert_equal(np.multiply(arr, obj), "ufunc") # obj is second, but has __numpy_ufunc__ and defines __rmul__. arr *= obj assert_equal(arr, 321) # obj2 is an ndarray subclass, so CPython takes care of the same rules. assert_equal(obj2 * arr, 123) assert_equal(arr * obj2, 321) assert_equal(arr - obj2, "no subs for me") assert_equal(arr > obj2, "nope") assert_equal(arr < obj2, "yep") # Called as a ufunc, obj2.__numpy_ufunc__ is called. assert_equal(np.multiply(arr, obj2), "ufunc") # Also when the method is not overridden. assert_equal(arr & obj2, "ufunc") arr *= obj2 assert_equal(arr, 321) obj2 += 33 assert_equal(obj2[0], 42) assert_equal(obj2.sum(), 42) assert_(isinstance(obj2, SomeClass2)) # Obj3 is subclass that defines __rsub__. CPython calls it. assert_equal(arr - obj3, "sub for me") assert_equal(obj2 - obj3, "sub for me") # obj3 is a subclass that defines __rmul__. CPython calls it. assert_equal(arr * obj3, 321) # But not here, since obj3.__rmul__ is obj2.__rmul__. assert_equal(obj2 * obj3, 123) # And of course, here obj3.__mul__ should be called. assert_equal(obj3 * obj2, 123) # obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__. # (and both are just ndarray.__radd__); see #4815. res = obj2 + obj3 assert_equal(res, 46) assert_(isinstance(res, SomeClass2)) # Since obj3 is a subclass, it should have precedence, like CPython # would give, even though obj2 has __numpy_ufunc__ and __radd__. # See gh-4815 and gh-5747. res = obj3 + obj2 assert_equal(res, 46) assert_(isinstance(res, SomeClass3)) def test_ufunc_override_normalize_signature(self): # gh-5674 class SomeClass(object): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): return kw a = SomeClass() kw = np.add(a, [1]) assert_('sig' not in kw and 'signature' not in kw) kw = np.add(a, [1], sig='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') kw = np.add(a, [1], signature='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') class TestCAPI(TestCase): def test_IsPythonScalar(self): from numpy.core.multiarray_tests import IsPythonScalar assert_(IsPythonScalar(b'foobar')) assert_(IsPythonScalar(1)) assert_(IsPythonScalar(2**80)) assert_(IsPythonScalar(2.)) assert_(IsPythonScalar("a")) class TestSubscripting(TestCase): def test_test_zero_rank(self): x = np.array([1, 2, 3]) self.assertTrue(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: self.assertTrue(isinstance(x[0], int)) self.assertTrue(type(x[0, ...]) is np.ndarray) class TestPickling(TestCase): def test_roundtrip(self): import pickle carray = np.array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, np.transpose(carray), np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] for a in DATA: assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) def _loads(self, obj): if sys.version_info[0] >= 3: return np.loads(obj, encoding='latin1') else: return np.loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field def test_version0_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) # version 1 pickles, using protocol=2 to pickle def test_version1_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) def test_subarray_int_shape(self): s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) p = self._loads(asbytes(s)) assert_equal(a, p) class TestFancyIndexing(TestCase): def test_list(self): x = np.ones((1, 1)) x[:, [0]] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:,:, [0]] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_tuple(self): x = np.ones((1, 1)) x[:, (0,)] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:,:, (0,)] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) assert_array_equal(x[m], np.array([2])) def test_mask2(self): x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) assert_array_equal(x[m], np.array([[5, 6, 7, 8]])) assert_array_equal(x[m2], np.array([2, 5])) assert_array_equal(x[m3], np.array([2])) def test_assign_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) x[m] = 5 assert_array_equal(x, np.array([1, 5, 3, 4])) def test_assign_mask2(self): xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) x = xorig.copy() x[m] = 10 assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]])) x = xorig.copy() x[m2] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]])) x = xorig.copy() x[m3] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]])) class TestStringCompare(TestCase): def test_string(self): g1 = np.array(["This", "is", "example"]) g2 = np.array(["This", "was", "example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): g1 = np.array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) assert_array_equal(g1 < g2, [x < g2 for x in g1]) assert_array_equal(g1 > g2, [x > g2 for x in g1]) assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) def test_unicode(self): g1 = np.array([sixu("This"), sixu("is"), sixu("example")]) g2 = np.array([sixu("This"), sixu("was"), sixu("example")]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 5), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2015-11-20T12:20:59'), np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 4), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 0), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 3), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 0), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 1), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 2), ([False, False, False, False, True], 4), ([False, False, False, True, False], 3), ([True, False, False, False, False], 0), ([True, False, True, False, False], 0), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 3), #(['zz', 'a', 'aa', 'a'], 0), #(['aa', 'z', 'zz', 'a'], 2), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) def test_argmax_unicode(self): d = np.zeros(6031, dtype='<U9') d[5942] = "as" assert_equal(d.argmax(), 5942) def test_np_vs_ndarray(self): # make sure both ndarray.argmax and numpy.argmax support out/axis args a = np.random.normal(size=(2,3)) #check positional args out1 = np.zeros(2, dtype=int) out2 = np.zeros(2, dtype=int) assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2)) assert_equal(out1, out2) #check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0)) assert_equal(out1, out2) class TestArgmin(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 0), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2014-11-20T12:20:59'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 4), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 1), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 2), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 0), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 1), ([True, True, True, True, False], 4), ([True, True, True, False, True], 3), ([False, True, True, True, True], 0), ([False, True, False, True, True], 0), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 0), #(['zz', 'a', 'aa', 'a'], 1), #(['aa', 'z', 'zz', 'a'], 3), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amin = a.min(i) aargmin = a.argmin(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmin(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr) def test_minimum_signed_integers(self): a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) assert_equal(np.argmin(a), 1) a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) assert_equal(np.argmin(a), 1) a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) assert_equal(np.argmin(a), 1) a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) assert_equal(np.argmin(a), 1) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) assert_equal(out, a.argmin(-1)) def test_argmin_unicode(self): d = np.ones(6031, dtype='<U9') d[6001] = "0" assert_equal(d.argmin(), 6001) def test_np_vs_ndarray(self): # make sure both ndarray.argmin and numpy.argmin support out/axis args a = np.random.normal(size=(2,3)) #check positional args out1 = np.zeros(2, dtype=int) out2 = np.ones(2, dtype=int) assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2)) assert_equal(out1, out2) #check keyword args out1 = np.zeros(3, dtype=int) out2 = np.ones(3, dtype=int) assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0)) assert_equal(out1, out2) class TestMinMax(TestCase): def test_scalar(self): assert_raises(ValueError, np.amax, 1, 1) assert_raises(ValueError, np.amin, 1, 1) assert_equal(np.amax(1, axis=0), 1) assert_equal(np.amin(1, axis=0), 1) assert_equal(np.amax(1, axis=None), 1) assert_equal(np.amin(1, axis=None), 1) def test_axis(self): assert_raises(ValueError, np.amax, [1, 2, 3], 1000) assert_equal(np.amax([[1, 2, 3]], axis=1), 3) def test_datetime(self): # NaTs are ignored for dtype in ('m8[s]', 'm8[Y]'): a = np.arange(10).astype(dtype) a[3] = 'NaT' assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[9]) a[0] = 'NaT' assert_equal(np.amin(a), a[1]) assert_equal(np.amax(a), a[9]) a.fill('NaT') assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[0]) class TestNewaxis(TestCase): def test_basic(self): sk = np.array([0, -0.1, 0.1]) res = 250*sk[:, np.newaxis] assert_almost_equal(res.ravel(), 250*sk) class TestClip(TestCase): def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) def _clip_type(self, type_group, array_max, clip_min, clip_max, inplace=False, expected_min=None, expected_max=None): if expected_min is None: expected_min = clip_min if expected_max is None: expected_max = clip_max for T in np.sctypes[type_group]: if sys.byteorder == 'little': byte_orders = ['=', '>'] else: byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: x.clip(clip_min, clip_max, x) else: x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' assert_equal(x.dtype.byteorder, byteorder) self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: self._clip_type( 'float', 1024, -12.8, 100.2, inplace=inplace) self._clip_type( 'float', 1024, 0, 0, inplace=inplace) self._clip_type( 'int', 1024, -120, 100.5, inplace=inplace) self._clip_type( 'int', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) y = rec['x'].clip(-0.3, 0.5) self._check_range(y, -0.3, 0.5) def test_max_or_min(self): val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) x = val.clip(3) assert_(np.all(x >= 3)) x = val.clip(min=3) assert_(np.all(x >= 3)) x = val.clip(max=4) assert_(np.all(x <= 4)) class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) assert_(np.all(x[mask] == T(val))) assert_(x.dtype == T) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(1000)*100 mask = x < 40 for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T), T, mask, val def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) np.putmask(x, [True, False, True], -1) assert_array_equal(x, [-1, 2, -1]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) np.putmask(rec['x'], [True, False], 10) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [2, 4]) assert_array_equal(rec['z'], [3, 3]) np.putmask(rec['y'], [True, False], 11) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) def test_masked_array(self): ## x = np.array([1,2,3]) ## z = np.ma.array(x,mask=[True,False,False]) ## np.putmask(z,[True,True,True],3) pass class TestTake(object): def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(24)*100 x.shape = 2, 3, 4 for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T) def test_raise(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) class TestLexsort(TestCase): def test_basic(self): a = [1, 2, 1, 3, 1, 5] b = [0, 4, 5, 6, 2, 3] idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) x = np.vstack((b, a)) idx = np.lexsort(x) assert_array_equal(idx, expected_idx) assert_array_equal(x[1][idx], np.sort(x[1])) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') b = np.array([2,1,0], dtype='datetime64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) a = np.array([0,0,0], dtype='timedelta64[D]') b = np.array([2,1,0], dtype='timedelta64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) class TestIO(object): """Test tofile, fromfile, tobytes, and fromstring""" def setUp(self): shape = (2, 4, 3) rand = np.random.random self.x = rand(shape) + rand(shape).astype(np.complex)*1j self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan] self.dtype = self.x.dtype self.tempdir = tempfile.mkdtemp() self.filename = tempfile.mktemp(dir=self.tempdir) def tearDown(self): shutil.rmtree(self.tempdir) def test_bool_fromstring(self): v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) assert_array_equal(v, y) def test_uint64_fromstring(self): d = np.fromstring("9923372036854775807 104783749223640", dtype=np.uint64, sep=' ') e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64) assert_array_equal(d, e) def test_int64_fromstring(self): d = np.fromstring("-25041670086757 104783749223640", dtype=np.int64, sep=' ') e = np.array([-25041670086757, 104783749223640], dtype=np.int64) assert_array_equal(d, e) def test_empty_files_binary(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename) assert_(y.size == 0, "Array not empty") def test_empty_files_text(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename, sep=" ") assert_(y.size == 0, "Array not empty") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) f.close() # NB. doesn't work with flush+seek, due to use of C stdio f = open(self.filename, 'rb') y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_roundtrip_binary_str(self): s = self.x.tobytes() y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flat) s = self.x.tobytes('F') y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") # NB. str imbues less precision nan_mask = ~np.isfinite(x) assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.close() for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) f = open(self.filename, mode) f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_file_position_after_tofile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: err_msg = "%d" % (size,) f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.seek(10) f.write(b'12') np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) f = open(self.filename, 'r+b') f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def _check_from(self, s, value, **kw): y = np.fromstring(asbytes(s), **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) def test_nan(self): self._check_from( "nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], sep=' ') def test_inf(self): self._check_from( "inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], sep=' ') def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), dtype='<f4') @dec.slow # takes > 1 minute on mechanical hard drive def test_big_binary(self): """Test workarounds for 32-bit limited fwrite, fseek, and ftell calls in windows. These normally would hang doing something like this. See http://projects.scipy.org/numpy/ticket/1660""" if sys.platform != 'win32': return try: # before workarounds, only up to 2**32-1 worked fourgbplus = 2**32 + 2**16 testbytes = np.arange(8, dtype=np.int8) n = len(testbytes) flike = tempfile.NamedTemporaryFile() f = flike.file np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) flike.seek(0) a = np.fromfile(f, dtype=np.int8) flike.close() assert_(len(a) == fourgbplus) # check only start and end for speed: assert_((a[:n] == testbytes).all()) assert_((a[-n:] == testbytes).all()) except (MemoryError, ValueError): pass def test_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') def test_counted_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') def test_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from('1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = '1,0,-2.3,0' f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) def test_tofile_sep(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.0,3.51,4.0') def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',', format='%.2f') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.00,3.51,4.00') def test_locale(self): in_foreign_locale(self.test_numbers)() in_foreign_locale(self.test_nan)() in_foreign_locale(self.test_inf)() in_foreign_locale(self.test_counted_string)() in_foreign_locale(self.test_ascii)() in_foreign_locale(self.test_malformed)() in_foreign_locale(self.test_tofile_sep)() in_foreign_locale(self.test_tofile_format)() class TestFromBuffer(object): def tst_basic(self, buffer, expected, kwargs): assert_array_equal(np.frombuffer(buffer,**kwargs), expected) def test_ip_basic(self): for byteorder in ['<', '>']: for dtype in [float, int, np.complex]: dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tobytes() yield self.tst_basic, buf, x.flat, {'dtype':dt} def test_empty(self): yield self.tst_basic, asbytes(''), np.array([]), {} class TestFlat(TestCase): def setUp(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) a.flags.writeable = False self.a = a self.b = a[::2, ::2] self.a0 = a0 self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False try: self.a.flat[12] = 100.0 except ValueError: testpassed = True assert testpassed assert self.a.flat[12] == 12.0 def test_discontiguous(self): testpassed = False try: self.b.flat[4] = 100.0 except ValueError: testpassed = True assert testpassed assert self.b.flat[4] == 12.0 def test___array__(self): c = self.a.flat.__array__() d = self.b.flat.__array__() e = self.a0.flat.__array__() f = self.b0.flat.__array__() assert c.flags.writeable is False assert d.flags.writeable is False assert e.flags.writeable is True assert f.flags.writeable is True assert c.flags.updateifcopy is False assert d.flags.updateifcopy is False assert e.flags.updateifcopy is False assert f.flags.updateifcopy is True assert f.base is self.b0 class TestResize(TestCase): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x self.assertRaises(ValueError, x.resize, (5, 1)) del y # avoid pyflakes unused variable warning. def test_int_shape(self): x = np.eye(3) x.resize(3) assert_array_equal(x, np.eye(3)[0,:]) def test_none_shape(self): x = np.eye(3) x.resize(None) assert_array_equal(x, np.eye(3)) x.resize() assert_array_equal(x, np.eye(3)) def test_invalid_arguements(self): self.assertRaises(TypeError, np.eye(3).resize, 'hi') self.assertRaises(ValueError, np.eye(3).resize, -1) self.assertRaises(TypeError, np.eye(3).resize, order=1) self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) class TestRecord(TestCase): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] assert_equal(dt.names, ['p', 'q']) if sys.version_info[0] >= 3: def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) dt = np.dtype([((asbytes('a'), 'b'), int)]) assert_raises(ValueError, dt.__getitem__, asbytes('a')) x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(IndexError, x.__getitem__, asbytes('a')) y = x[0] assert_raises(IndexError, y.__getitem__, asbytes('a')) else: def test_unicode_field_titles(self): # Unicode field titles are added to field dict on Py2 title = unicode('b') dt = np.dtype([((title, 'a'), int)]) dt[title] dt['a'] x = np.array([(1,), (2,), (3,)], dtype=dt) x[title] x['a'] y = x[0] y[title] y['a'] def test_unicode_field_names(self): # Unicode field names are not allowed on Py2 title = unicode('b') assert_raises(TypeError, np.dtype, [(title, int)]) assert_raises(TypeError, np.dtype, [(('a', title), int)]) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) is_py3 = sys.version_info[0] >= 3 if is_py3: funcs = (str,) # byte string indexing fails gracefully assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1) assert_raises(IndexError, a.__getitem__, asbytes('f1')) assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1) assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1')) else: funcs = (str, unicode) for func in funcs: b = a.copy() fn1 = func('f1') b[fn1] = 1 assert_equal(b[fn1], 1) fnn = func('not at all') assert_raises(ValueError, b.__setitem__, fnn, 1) assert_raises(ValueError, b.__getitem__, fnn) b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield assert_raises(IndexError, b[0].__setitem__, fnn, 1) assert_raises(IndexError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') b[fn3][sfn1] = 1 assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) # multiple Subfields fn2 = func('f2') b[fn2] = 3 assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # view of subfield view/copy assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])] assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' 'raises segfault on python 2.x') else: assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1) assert_raises(ValueError, a.__getitem__, sixu('\u03e0')) def test_field_names_deprecation(self): def collect_warnings(f, *args, **kwargs): with warnings.catch_warnings(record=True) as log: warnings.simplefilter("always") f(*args, **kwargs) return [w.category for w in log] a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) a['f1'][0] = 1 a['f2'][0] = 2 a['f3'][0] = (3,) b = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) b['f1'][0] = 1 b['f2'][0] = 2 b['f3'][0] = (3,) # All the different functions raise a warning, but not an error, and # 'a' is not modified: assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)), [FutureWarning]) assert_equal(a, b) # Views also warn subset = a[['f1', 'f2']] subset_view = subset.view() assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10), [FutureWarning]) # But the write goes through: assert_equal(subset['f1'][0], 10) # Only one warning per multiple field indexing, though (even if there # are multiple views involved): assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), []) def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False self.assertTrue(hash(a[0]) == hash(a[1])) self.assertTrue(hash(a[0]) == hash(b[0])) self.assertTrue(hash(a[0]) != hash(b[1])) self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') self.assertRaises(TypeError, hash, a[0]) def test_empty_structure_creation(self): # make sure these do not raise errors (gh-5631) np.array([()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) class TestView(TestCase): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i4') # ... and again without the keyword. z = x.view('<i4') assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) def _mean(a, **args): return a.mean(**args) def _var(a, **args): return a.var(**args) def _std(a, **args): return a.std(**args) class TestStats(TestCase): funcs = [_mean, _var, _std] def setUp(self): np.random.seed(range(3)) self.rmat = np.random.random((4, 5)) self.cmat = self.rmat + 1j * self.rmat self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat]) self.omat = self.omat.reshape(4, 5) def test_keepdims(self): mat = np.eye(3) for f in self.funcs: for axis in [0, 1]: res = f(mat, axis=axis, keepdims=True) assert_(res.ndim == mat.ndim) assert_(res.shape[axis] == 1) for axis in [None]: res = f(mat, axis=axis, keepdims=True) assert_(res.shape == (1, 1)) def test_out(self): mat = np.eye(3) for f in self.funcs: out = np.zeros(3) tgt = f(mat, axis=1) res = f(mat, axis=1, out=out) assert_almost_equal(res, out) assert_almost_equal(res, tgt) out = np.empty(2) assert_raises(ValueError, f, mat, axis=1, out=out) out = np.empty((2, 2)) assert_raises(ValueError, f, mat, axis=1, out=out) def test_dtype_from_input(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] # object type for f in self.funcs: mat = np.array([[Decimal(1)]*3]*3) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = type(f(mat, axis=None)) assert_(res is Decimal) # integer types for f in self.funcs: for c in icodes: mat = np.eye(3, dtype=c) tgt = np.float64 res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # mean for float types for f in [_mean]: for c in fcodes: mat = np.eye(3, dtype=c) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # var, std for float types for f in [_var, _std]: for c in fcodes: mat = np.eye(3, dtype=c) # deal with complex types tgt = mat.real.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_dtype(self): mat = np.eye(3) # stats for integer types # FIXME: # this needs definition as there are lots places along the line # where type casting may take place. #for f in self.funcs: # for c in np.typecodes['AllInteger']: # tgt = np.dtype(c).type # res = f(mat, axis=1, dtype=c).dtype.type # assert_(res is tgt) # # scalar case # res = f(mat, axis=None, dtype=c).dtype.type # assert_(res is tgt) # stats for float types for f in self.funcs: for c in np.typecodes['AllFloat']: tgt = np.dtype(c).type res = f(mat, axis=1, dtype=c).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None, dtype=c).dtype.type assert_(res is tgt) def test_ddof(self): for f in [_var]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * dim res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * np.sqrt(dim) res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): dim = self.rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(self.rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(A, axis=axis)).all()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] assert_almost_equal(res, tgt) for axis in [None]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * np.prod(mat.shape) assert_almost_equal(res, tgt) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() res = _var(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_subclass(self): class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) assert_(res.info == dat.info) res = dat.var(1) assert_(res.info == dat.info) class TestVdot(TestCase): def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] # test real a = np.eye(3) for dt in dt_numeric + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test complex a = np.eye(3) * 1j for dt in dt_complex + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test boolean b = np.eye(3, dtype=np.bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True) def test_vdot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.vdot(a, a) # integer arrays are exact assert_equal(np.vdot(a, b), res) assert_equal(np.vdot(b, a), res) assert_equal(np.vdot(b, b), res) class TestDot(TestCase): def setUp(self): np.random.seed(128) self.A = np.random.rand(4, 2) self.b1 = np.random.rand(2, 1) self.b2 = np.random.rand(2) self.b3 = np.random.rand(1, 2) self.b4 = np.random.rand(4) self.N = 7 def test_dotmatmat(self): A = self.A res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): A, b1 = self.A, self.b1 res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): A, b2 = self.A, self.b2 res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): A, b4 = self.A, self.b4 res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): b3, A = self.b3, self.A res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): A, b4 = self.A, self.b4 res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): b1, b3 = self.b1, self.b3 res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): b1, b3 = self.b1, self.b3 res = np.dot(b3, b1) tgt = np.array([[ 0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): b1 = np.ones((3, 1)) b2 = [5.3] res = np.dot(b1, b2) tgt = np.array([5.3, 5.3, 5.3]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect2(self): b1 = np.ones((3, 1)).transpose() b2 = [6.2] res = np.dot(b2, b1) tgt = np.array([6.2, 6.2, 6.2]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): np.random.seed(100) b1 = np.random.rand(1, 1) b2 = np.random.rand(1, 4) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): np.random.seed(100) b1 = np.random.rand(4, 1) b2 = np.random.rand(1, 1) res = np.dot(b1, b2) tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): dims = [(), (1,), (1, 1)] dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): b1 = np.zeros(dim1) b2 = np.zeros(dim2) res = np.dot(b1, b2) tgt = np.zeros(dim) assert_(res.shape == tgt.shape) assert_almost_equal(res, tgt, decimal=self.N) def test_vecobject(self): class Vec(object): def __init__(self, sequence=None): if sequence is None: sequence = [] self.array = np.array(sequence) def __add__(self, other): out = Vec() out.array = self.array + other.array return out def __sub__(self, other): out = Vec() out.array = self.array - other.array return out def __mul__(self, other): # with scalar out = Vec(self.array.copy()) out.array *= other return out def __rmul__(self, other): return self*other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) x = np.array([Vec([1., 0.]), Vec([0., 1.])]) zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) def test_dot_2args(self): from numpy.core.multiarray import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): dot(f, v, r) assert_equal(sys.getrefcount(r), 2) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = dot(f, v) assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, dot, f, v, r[:, ::2]) assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) def test_dot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.dot(a, a) # integer arrays are exact assert_equal(np.dot(a, b), res) assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) def test_dot_scalar_and_matrix_of_objects(self): # Ticket #2469 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) def test_dot_override(self): class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A() b = B() c = np.array([[1]]) assert_equal(np.dot(a, b), "A") assert_equal(c.dot(a), "A") assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): d = dtype(0) N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) m = aligned_array(100, 15, np.float32) s = aligned_array((100, 100), 15, np.float32) np.dot(s, m) # this will always segfault if the bug is present testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) for align, m, n, a_order in testdata: # Calculation in double precision A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32) assert_dot_close(A_f, X_f, desired) # Strided A rows A_d_2 = A_d[::2] desired = np.dot(A_d_2, X_d) A_f_2 = A_f[::2] assert_dot_close(A_f_2, X_f, desired) # Strided A columns, strided X vector A_d_22 = A_d_2[:, ::2] X_d_2 = X_d[::2] desired = np.dot(A_d_22, X_d_2) A_f_22 = A_f_2[:, ::2] X_f_2 = X_f[::2] assert_dot_close(A_f_22, X_f_2, desired) # Check the strides are as expected if a_order == 'F': assert_equal(A_f_22.strides, (8, 8 * m)) else: assert_equal(A_f_22.strides, (8 * n, 8)) assert_equal(X_f_2.strides, (8,)) # Strides in A rows + cols only X_f_2c = as_aligned(X_f_2, align, np.float32) assert_dot_close(A_f_22, X_f_2c, desired) # Strides just in A cols A_d_12 = A_d[:, ::2] desired = np.dot(A_d_12, X_d_2) A_f_12 = A_f[:, ::2] assert_dot_close(A_f_12, X_f_2c, desired) # Strides in A cols and X assert_dot_close(A_f_12, X_f_2, desired) class MatmulCommon(): """Common tests for '@' operator and numpy.matmul. Do not derive from TestCase to avoid nose running it. """ # Should work with these types. Will want to add # "O" at some point types = "?bhilqBHILQefdgFDG" def test_exceptions(self): dims = [ ((1,), (2,)), # mismatched vector vector ((2, 1,), (2,)), # mismatched matrix vector ((2,), (1, 2)), # mismatched vector matrix ((1, 2), (3, 1)), # mismatched matrix matrix ((1,), ()), # vector scalar ((), (1)), # scalar vector ((1, 1), ()), # matrix scalar ((), (1, 1)), # scalar matrix ((2, 2, 1), (3, 1, 2)), # cannot broadcast ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) assert_raises(ValueError, self.matmul, a, b) def test_shapes(self): dims = [ ((1, 1), (2, 1, 1)), # broadcast first argument ((2, 1, 1), (1, 1)), # broadcast second argument ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) res = self.matmul(a, b) assert_(res.shape == (2, 1, 1)) # vector vector returns scalars. for dt in self.types: a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) c = self.matmul(a, b) assert_(np.array(c).shape == ()) def test_result_types(self): mat = np.ones((1,1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) v = vec.astype(dt) for arg in [(m, v), (v, m), (m, m)]: res = self.matmul(*arg) assert_(res.dtype == dt) # vector vector returns scalars res = self.matmul(v, v) assert_(type(res) is np.dtype(dt).type) def test_vector_vector_values(self): vec = np.array([1, 2]) tgt = 5 for dt in self.types[1:]: v1 = vec.astype(dt) res = self.matmul(v1, v1) assert_equal(res, tgt) # boolean type vec = np.array([True, True], dtype='?') res = self.matmul(vec, vec) assert_equal(res, True) def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([7, 10]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(v, m1) assert_equal(res, tgt1) res = self.matmul(v, m2) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([5, 11]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(m1, v) assert_equal(res, tgt1) res = self.matmul(m2, v) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_matrix_values(self): mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.array([[1, 0], [1, 1]]) mat12 = np.stack([mat1, mat2], axis=0) mat21 = np.stack([mat2, mat1], axis=0) tgt11 = np.array([[7, 10], [15, 22]]) tgt12 = np.array([[3, 2], [7, 4]]) tgt21 = np.array([[1, 2], [4, 6]]) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) for dt in self.types[1:]: m1 = mat1.astype(dt) m2 = mat2.astype(dt) m12 = mat12.astype(dt) m21 = mat21.astype(dt) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) # boolean type m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) m12 = np.stack([m1, m2], axis=0) m21 = np.stack([m2, m1], axis=0) tgt11 = m1 tgt12 = m1 tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) def test_numpy_ufunc_override(self): class A(np.ndarray): def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" class B(np.ndarray): def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return NotImplemented a = A([1, 2]) b = B([1, 2]) c = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") assert_raises(TypeError, self.matmul, b, c) class TestMatmul(MatmulCommon, TestCase): matmul = np.matmul def test_out_arg(self): a = np.ones((2, 2), dtype=np.float) b = np.ones((2, 2), dtype=np.float) tgt = np.full((2,2), 2, dtype=np.float) # test as positional argument msg = "out positional argument" out = np.zeros((2, 2), dtype=np.float) self.matmul(a, b, out) assert_array_equal(out, tgt, err_msg=msg) # test as keyword argument msg = "out keyword argument" out = np.zeros((2, 2), dtype=np.float) self.matmul(a, b, out=out) assert_array_equal(out, tgt, err_msg=msg) # test out with not allowed type cast (safe casting) # einsum and cblas raise different error types, so # use Exception. msg = "out argument with illegal cast" out = np.zeros((2, 2), dtype=np.int32) assert_raises(Exception, self.matmul, a, b, out=out) # skip following tests for now, cblas does not allow non-contiguous # outputs and consistency with dot would require same type, # dimensions, subtype, and c_contiguous. # test out with allowed type cast # msg = "out argument with allowed cast" # out = np.zeros((2, 2), dtype=np.complex128) # self.matmul(a, b, out=out) # assert_array_equal(out, tgt, err_msg=msg) # test out non-contiguous # msg = "out argument with non-contiguous layout" # c = np.zeros((2, 2, 2), dtype=np.float) # self.matmul(a, b, out=c[..., 0]) # assert_array_equal(c, tgt, err_msg=msg) if sys.version_info[:2] >= (3, 5): class TestMatmulOperator(MatmulCommon, TestCase): import operator matmul = operator.matmul def test_array_priority_override(self): class A(object): __array_priority__ = 1000 def __matmul__(self, other): return "A" def __rmatmul__(self, other): return "A" a = A() b = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") def test_matmul_inplace(): # It would be nice to support in-place matmul eventually, but for now # we don't have a working implementation, so better just to error out # and nudge people to writing "a = a @ b". a = np.eye(3) b = np.eye(3) assert_raises(TypeError, a.__imatmul__, b) import operator assert_raises(TypeError, operator.imatmul, a, b) # we avoid writing the token `exec` so as not to crash python 2's # parser exec_ = getattr(builtins, "exec") assert_raises(TypeError, exec_, "a @= b", globals(), locals()) class TestInner(TestCase): def test_inner_scalar_and_matrix_of_objects(self): # Ticket #4482 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.inner(arr, 3), desired) assert_equal(np.inner(3, arr), desired) def test_vecself(self): # Ticket 844. # Inner product of a vector with itself segfaults or give # meaningless result a = np.zeros(shape=(1, 80), dtype=np.float64) p = np.inner(a, a) assert_almost_equal(p, 0, decimal=14) class TestSummarization(TestCase): def test_1d(self): A = np.arange(1001) strA = '[ 0 1 2 ..., 998 999 1000]' assert_(str(A) == strA) reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' assert_(repr(A) == reprA) def test_2d(self): A = np.arange(1002).reshape(2, 501) strA = '[[ 0 1 2 ..., 498 499 500]\n' \ ' [ 501 502 503 ..., 999 1000 1001]]' assert_(str(A) == strA) reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) class TestChoose(TestCase): def setUp(self): self.x = 2*np.ones((3,), dtype=int) self.y = 3*np.ones((3,), dtype=int) self.x2 = 2*np.ones((2, 3), dtype=int) self.y2 = 3*np.ones((2, 3), dtype=int) self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} class TestNeighborhoodIter(TestCase): # Simple, 2d tests def _test_simple2d(self, dt): # Test zero and one padding for simple data type x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple2d(self): self._test_simple2d(np.float) def test_simple2d_object(self): self._test_simple2d(Decimal) def _test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) assert_array_equal(l, r) def test_mirror2d(self): self._test_mirror2d(np.float) def test_mirror2d_object(self): self._test_mirror2d(Decimal) # Simple, 1d tests def _test_simple(self, dt): # Test padding with constant values x = np.linspace(1, 5, 5).astype(dt) r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple_float(self): self._test_simple(np.float) def test_simple_object(self): self._test_simple(Decimal) # Test mirror modes def _test_mirror(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) self.assertTrue([i.dtype == dt for i in l]) assert_array_equal(l, r) def test_mirror(self): self._test_mirror(np.float) def test_mirror_object(self): self._test_mirror(Decimal) # Circular mode def _test_circular(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) assert_array_equal(l, r) def test_circular(self): self._test_circular(np.float) def test_circular_object(self): self._test_circular(Decimal) # Test stacking neighborhood iterators class TestStackedNeighborhoodIter(TestCase): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 # Test zero and one padding for simple data type x = np.array([1, 2, 3], dtype=dt) r = [np.array([0], dtype=dt), np.array([0], dtype=dt), np.array([1], dtype=dt), np.array([2], dtype=dt), np.array([3], dtype=dt), np.array([0], dtype=dt), np.array([0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([1, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) assert_array_equal(l, r) # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # mirror padding def test_simple_mirror(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 1], dtype=dt), np.array([1, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 3], dtype=dt), np.array([3, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # circular padding def test_simple_circular(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 3, 1], dtype=dt), np.array([3, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 1], dtype=dt), np.array([3, 1, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator # being strictly within the array def test_simple_strict_within(self): dt = np.float64 # Stacking zero on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) class TestWarnings(object): def test_complex_warning(self): x = np.array([1, 2]) y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) class TestMinScalarType(object): def test_usigned_shortshort(self): dt = np.min_scalar_type(2**8-1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): dt = np.min_scalar_type(2**16-1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): dt = np.min_scalar_type(2**32-1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): dt = np.min_scalar_type(2**63-1) wanted = np.dtype('uint64') assert_equal(wanted, dt) def test_object(self): dt = np.min_scalar_type(2**64) wanted = np.dtype('O') assert_equal(wanted, dt) if sys.version_info[:2] == (2, 6): from numpy.core.multiarray import memorysimpleview as memoryview from numpy.core._internal import _dtype_from_pep3118 class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) if isinstance(wanted, list) and isinstance(wanted[-1], tuple): if wanted[-1][0] == '': names = list(dt.names) names[-1] = '' dt.names = tuple(names) assert_equal(_dtype_from_pep3118(spec), dt, err_msg="spec %r != dtype %r" % (spec, wanted)) def test_native_padding(self): align = np.dtype('i').alignment for j in range(8): if j == 0: s = 'bi' else: s = 'b%dxi' % j self._check('@'+s, {'f0': ('i1', 0), 'f1': ('i', align*(1 + j//align))}) self._check('='+s, {'f0': ('i1', 0), 'f1': ('i', 1+j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) def test_trailing_padding(self): # Trailing padding should be included, *and*, the item size # should match the alignment if in aligned mode align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('ix', [('f0', 'i'), ('', VV(1))]) self._check('ixx', [('f0', 'i'), ('', VV(2))]) self._check('ixxx', [('f0', 'i'), ('', VV(3))]) self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) self._check('i7x', [('f0', 'i'), ('', VV(7))]) self._check('^ix', [('f0', 'i'), ('', 'V1')]) self._check('^ixx', [('f0', 'i'), ('', 'V2')]) self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) self._check('^i7x', [('f0', 'i'), ('', 'V7')]) def test_native_padding_3(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) def test_padding_with_array_inside_struct(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) def test_byteorder_inside_struct(self): # The byte order after @T{=i} should be '=', not '@'. # Check this by noting the absence of native alignment. self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), 'f1': ('i', 5)}) def test_intra_padding(self): # Natively aligned sub-arrays may require some internal padding align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) class TestNewBufferProtocol(object): def _check_roundtrip(self, obj): obj = np.asarray(obj) x = memoryview(obj) y = np.asarray(x) y2 = np.array(x) assert_(not y.flags.owndata) assert_(y2.flags.owndata) assert_equal(y.dtype, obj.dtype) assert_equal(y.shape, obj.shape) assert_array_equal(obj, y) assert_equal(y2.dtype, obj.dtype) assert_equal(y2.shape, obj.shape) assert_array_equal(obj, y2) def test_roundtrip(self): x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], dtype=dt) self._check_roundtrip(x) x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i4') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i4') self._check_roundtrip(x) # check long long can be represented as non-native x = np.array([1, 2, 3], dtype='>q') self._check_roundtrip(x) # Native-only data types can be passed through the buffer interface # only in native byte order if sys.byteorder == 'little': x = np.array([1, 2, 3], dtype='>g') assert_raises(ValueError, self._check_roundtrip, x) x = np.array([1, 2, 3], dtype='<g') self._check_roundtrip(x) else: x = np.array([1, 2, 3], dtype='>g') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<g') assert_raises(ValueError, self._check_roundtrip, x) def test_roundtrip_half(self): half_list = [ 1.0, -2.0, 6.5504 * 10**4, # (max half precision) 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal) 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal) 0.0, -0.0, float('+inf'), float('-inf'), 0.333251953125, # ~= 1/3 ] x = np.array(half_list, dtype='>e') self._check_roundtrip(x) x = np.array(half_list, dtype='<e') self._check_roundtrip(x) def test_roundtrip_single_types(self): for typ in np.typeDict.values(): dtype = np.dtype(typ) if dtype.char in 'Mm': # datetimes cannot be used in buffers continue if dtype.char == 'V': # skip void continue x = np.zeros(4, dtype=dtype) self._check_roundtrip(x) if dtype.char not in 'qQgG': dt = dtype.newbyteorder('<') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) dt = dtype.newbyteorder('>') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) def test_roundtrip_scalar(self): # Issue #4015. self._check_roundtrip(0) def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) def test_export_subarray(self): x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) assert_equal(y.ndim, 0) assert_equal(y.strides, EMPTY) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 16) def test_export_endian(self): x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') x = np.array([1, 2, 3], dtype='<i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, 'i') else: assert_equal(y.format, '<i') def test_export_flags(self): # Check SIMPLE flag, see also gh-3613 (exception should be BufferError) assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) def test_reference_leak(self): count_1 = sys.getrefcount(np.core._internal) a = np.zeros(4) b = memoryview(a) c = np.asarray(b) count_2 = sys.getrefcount(np.core._internal) assert_equal(count_1, count_2) del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1) self._check_roundtrip(x1) dt2 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2) self._check_roundtrip(x2) dt3 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) self._check_roundtrip(x3) def test_relaxed_strides(self): # Test that relaxed strides are converted to non-relaxed c = np.ones((1, 10, 10), dtype='i8') # Check for NPY_RELAXED_STRIDES_CHECKING: if np.ones((10, 1), order="C").flags.f_contiguous: c.strides = (-1, 80, 8) assert memoryview(c).strides == (800, 80, 8) # Writing C-contiguous data to a BytesIO buffer should work fd = io.BytesIO() fd.write(c.data) fortran = c.T assert memoryview(fortran).strides == (8, 80, 800) arr = np.ones((1, 10)) if arr.flags.f_contiguous: shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) assert_(strides[0] == 8) arr = np.ones((10, 1), order='F') shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) assert_(strides[-1] == 8) class TestArrayAttributeDeletion(object): def test_multiarray_writable_attributes_deletion(self): """ticket #2046, should not seqfault, raise AttributeError""" a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_not_writable_attributes_deletion(self): a = np.ones(2) attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", "ctypes", "T", "__array_interface__", "__array_struct__", "__array_priority__", "__array_finalize__"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags attr = ['updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_not_writable_attribute_deletion(self): a = np.ones(2).flags attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", "owndata", "fnc", "forc", "behaved", "carray", "farray", "num"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_array_interface(): # Test scalar coercion within the array interface class Foo(object): def __init__(self, value): self.value = value self.iface = {'typestr': '=f8'} def __float__(self): return float(self.value) @property def __array_interface__(self): return self.iface f = Foo(0.5) assert_equal(np.array(f), 0.5) assert_equal(np.array([f]), [0.5]) assert_equal(np.array([f, f]), [0.5, 0.5]) assert_equal(np.array(f).dtype, np.dtype('=f8')) # Test various shape definitions f.iface['shape'] = () assert_equal(np.array(f), 0.5) f.iface['shape'] = None assert_raises(TypeError, np.array, f) f.iface['shape'] = (1, 1) assert_equal(np.array(f), [[0.5]]) f.iface['shape'] = (2,) assert_raises(ValueError, np.array, f) # test scalar with no shape class ArrayLike(object): array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) def test_flat_element_deletion(): it = np.ones(3).flat try: del it[1] del it[1:2] except TypeError: pass except: raise AssertionError def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') class TestMemEventHook(TestCase): def test_mem_seteventhook(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src test_pydatamem_seteventhook_start() # force an allocation and free of a numpy array # needs to be larger then limit of small memory cacher in ctors.c a = np.zeros(1000) del a test_pydatamem_seteventhook_end() class TestMapIter(TestCase): def test_mapiter(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src a = np.arange(12).reshape((3, 4)).astype(float) index = ([1, 1, 2, 0], [0, 0, 2, 3]) vals = [50, 50, 30, 16] test_inplace_increment(a, index, vals) assert_equal(a, [[0.00, 1., 2.0, 19.], [104., 5., 6.0, 7.0], [8.00, 9., 40., 11.]]) b = np.arange(6).astype(float) index = (np.array([1, 2, 0]),) vals = [50, 4, 100.1] test_inplace_increment(b, index, vals) assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) class TestAsCArray(TestCase): def test_1darray(self): array = np.arange(24, dtype=np.double) from_c = test_as_c_array(array, 3) assert_equal(array[3], from_c) def test_2darray(self): array = np.arange(24, dtype=np.double).reshape(3, 8) from_c = test_as_c_array(array, 2, 4) assert_equal(array[2, 4], from_c) def test_3darray(self): array = np.arange(24, dtype=np.double).reshape(2, 3, 4) from_c = test_as_c_array(array, 1, 2, 3) assert_equal(array[1, 2, 3], from_c) class TestConversion(TestCase): def test_array_scalar_relational_operation(self): #All integer for dt1 in np.typecodes['AllInteger']: assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) #Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) #unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) #Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) class TestWhere(TestCase): def test_basic(self): dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=np.bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) def test_exotic(self): # object assert_array_equal(np.where(True, None, None), np.array(None)) # zero sized m = np.array([], dtype=bool).reshape(0, 3) b = np.array([], dtype=np.float64).reshape(0, 3) assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) # object cast d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, 1.267, 0.229, -1.39, 0.487]) nan = float('NaN') e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], dtype=object) m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool) r = e[:] r[np.where(m)] = d[np.where(m)] assert_array_equal(np.where(m, d, e), r) r = e[:] r[np.where(~m)] = d[np.where(~m)] assert_array_equal(np.where(m, e, d), r) assert_array_equal(np.where(m, e, e), e) # minimal dtype result with NaN scalar (e.g required by pandas) d = np.array([1., 2.], dtype=np.float32) e = float('NaN') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast e = float(1e150) assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) r = np.where(np.array(c)[:,np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) assert_array_equal(r[:,0], a[:,0]) assert_array_equal(r[:,1], b[:,0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) assert_equal(np.where(c, a, b), r) # non bool mask c = c.astype(np.int) c[c != 0] = 34242324 assert_equal(np.where(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 assert_equal(np.where(c, b, a), r) def test_foreign(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) a = np.ones(1, dtype='>i4') b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) b = b.astype('>f8') assert_equal(np.where(c, a, b), r) a = a.astype('<i4') assert_equal(np.where(c, a, b), r) c = c.astype('>i4') assert_equal(np.where(c, a, b), r) def test_error(self): c = [True, True] a = np.ones((4, 5)) b = np.ones((5, 5)) assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") b = np.array("x" * 753) assert_equal(np.where(True, a, b), "abc") assert_equal(np.where(False, b, a), "abc") # check native datatype sized strings a = np.array("abcd") b = np.array("x" * 8) assert_equal(np.where(True, a, b), "abcd") assert_equal(np.where(False, b, a), "abcd") class TestSizeOf(TestCase): def test_empty_array(self): x = np.array([]) assert_(sys.getsizeof(x) > 0) def check_array(self, dtype): elem_size = dtype(0).itemsize for length in [10, 50, 100, 500]: x = np.arange(length, dtype=dtype) assert_(sys.getsizeof(x) > length * elem_size) def test_array_int32(self): self.check_array(np.int32) def test_array_int64(self): self.check_array(np.int64) def test_array_float32(self): self.check_array(np.float32) def test_array_float64(self): self.check_array(np.float64) def test_view(self): d = np.ones(100) assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) def test_reshape(self): d = np.ones(100) assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) def test_resize(self): d = np.ones(100) old = sys.getsizeof(d) d.resize(50) assert_(old > sys.getsizeof(d)) d.resize(150) assert_(old < sys.getsizeof(d)) def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") class TestHashing(TestCase): def test_collections_hashable(self): x = np.array([]) self.assertFalse(isinstance(x, collections.Hashable)) from numpy.core._internal import _view_is_safe class TestObjViewSafetyFuncs(TestCase): def test_view_safety(self): psize = np.dtype('p').itemsize # creates dtype but with extra character code - for missing 'p' fields def mtype(s): n, offset, fields = 0, 0, [] for c in s.split(','): # subarrays won't work if c != '-': fields.append(('f{0}'.format(n), c, offset)) n += 1 offset += np.dtype(c).itemsize if c != '-' else psize names, formats, offsets = zip(*fields) return np.dtype({'names': names, 'formats': formats, 'offsets': offsets, 'itemsize': offset}) # test nonequal itemsizes with objects: # these should succeed: _view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p')) _view_is_safe(np.dtype('O,O'), np.dtype('O,O,O')) # these should fail: assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O')) assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p')) assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O')) # test nonequal itemsizes with missing fields: # these should succeed: _view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p')) _view_is_safe(np.dtype('p,p'), np.dtype('p,p,p')) # these should fail: assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p')) assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-')) assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p')) # scans through positions at which we can view a type def scanView(d1, otype): goodpos = [] for shift in range(d1.itemsize - np.dtype(otype).itemsize+1): d2 = np.dtype({'names': ['f0'], 'formats': [otype], 'offsets': [shift], 'itemsize': d1.itemsize}) try: _view_is_safe(d1, d2) except TypeError: pass else: goodpos.append(shift) return goodpos # test partial overlap with object field assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'), [0] + list(range(2*psize, 3*psize+1))) assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'), [psize, 4*psize, 5*psize]) # test partial overlap with missing field assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'), [0] + list(range(2*psize, 3*psize+1))) # test nested structures with objects: nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')]) assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize]) assert_equal(scanView(nestedO, 'O'), [2*psize]) # test nested structures with missing fields: nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))]) assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize]) # test subarrays with objects subarrayO = np.dtype('p,(2,3)O,p') assert_equal(scanView(subarrayO, 'p'), [0, 7*psize]) assert_equal(scanView(subarrayO, 'O'), list(range(psize, 6*psize+1, psize))) #test dtype with overlapping fields overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'], 'formats': ['p', 'p', 'p', 'p'], 'offsets': [0, 1, 3*psize-1, 3*psize], 'itemsize': 4*psize}) assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize]) class TestArrayPriority(TestCase): # This will go away when __array_priority__ is settled, meanwhile # it serves to check unintended changes. op = operator binary_ops = [ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, op.ge, op.lt, op.le, op.ne, op.eq ] if sys.version_info[0] < 3: binary_ops.append(op.div) class Foo(np.ndarray): __array_priority__ = 100. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Bar(np.ndarray): __array_priority__ = 101. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Other(object): __array_priority__ = 1000. def _all(self, other): return self.__class__() __add__ = __radd__ = _all __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all __and__ = __rand__ = _all __xor__ = __rxor__ = _all __or__ = __ror__ = _all __lshift__ = __rlshift__ = _all __rshift__ = __rrshift__ = _all __eq__ = _all __ne__ = _all __gt__ = _all __ge__ = _all __lt__ = _all __le__ = _all def test_ndarray_subclass(self): a = np.array([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_ndarray_other(self): a = np.array([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) def test_subclass_subclass(self): a = self.Foo([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_subclass_other(self): a = self.Foo([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) class TestBytestringArrayNonzero(TestCase): def test_empty_bstring_array_is_falsey(self): self.assertFalse(np.array([''], dtype=np.str)) def test_whitespace_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=np.str) a[0] = ' \0\0' self.assertFalse(a) def test_all_null_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=np.str) a[0] = '\0\0\0\0' self.assertFalse(a) def test_null_inside_bstring_array_is_truthy(self): a = np.array(['spam'], dtype=np.str) a[0] = ' \0 \0' self.assertTrue(a) class TestUnicodeArrayNonzero(TestCase): def test_empty_ustring_array_is_falsey(self): self.assertFalse(np.array([''], dtype=np.unicode)) def test_whitespace_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0\0' self.assertFalse(a) def test_all_null_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = '\0\0\0\0' self.assertFalse(a) def test_null_inside_ustring_array_is_truthy(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0 \0' self.assertTrue(a) if __name__ == "__main__": run_module_suite()
Java
/*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef mitkVtkPropRenderer_h #define mitkVtkPropRenderer_h #include "mitkBaseRenderer.h" #include <MitkCoreExports.h> #include <itkCommand.h> #include <mitkDataStorage.h> #include <mitkRenderingManager.h> #include <map> #include <utility> class vtkRenderWindow; class vtkLight; class vtkLightKit; class vtkWorldPointPicker; class vtkPointPicker; class vtkCellPicker; class vtkTextActor; class vtkTextProperty; class vtkAssemblyPath; #include <vtkAssemblyPaths.h> #include <vtkSmartPointer.h> namespace mitk { class Mapper; /*! \brief VtkPropRenderer VtkPropRenderer organizes the MITK rendering process. The MITK rendering process is completely integrated into the VTK rendering pipeline. The vtkMitkRenderProp is a custom vtkProp derived class, which implements the rendering interface between MITK and VTK. It redirects render() calls to the VtkPropRenderer, which is responsible for rendering of the datatreenodes. VtkPropRenderer replaces the old OpenGLRenderer. \sa rendering \ingroup rendering */ class MITKCORE_EXPORT VtkPropRenderer : public BaseRenderer { // Workaround for Displaylistbug private: bool didCount; void checkState(); // Workaround END public: mitkClassMacro(VtkPropRenderer, BaseRenderer); mitkNewMacro3Param(VtkPropRenderer, const char *, vtkRenderWindow *, mitk::RenderingManager *); mitkNewMacro4Param(VtkPropRenderer, const char *, vtkRenderWindow *, mitk::RenderingManager *, mitk::BaseRenderer::RenderingMode::Type); typedef std::map<int, Mapper *> MappersMapType; // Render - called by vtkMitkRenderProp, returns the number of props rendered enum RenderType { Opaque, Translucent, Overlay, Volumetric }; int Render(RenderType type); /** \brief This methods contains all method neceassary before a VTK Render() call */ virtual void PrepareRender(); // Active current renderwindow virtual void MakeCurrent(); virtual void SetDataStorage( mitk::DataStorage *storage) override; ///< set the datastorage that will be used for rendering virtual void InitRenderer(vtkRenderWindow *renderwindow) override; virtual void Update(mitk::DataNode *datatreenode); virtual void SetMapperID(const MapperSlotId mapperId) override; // Size virtual void InitSize(int w, int h) override; virtual void Resize(int w, int h) override; // Picking enum PickingMode { WorldPointPicking, PointPicking, CellPicking }; /** \brief Set the picking mode. This method is used to set the picking mode for 3D object picking. The user can select one of the three options WorldPointPicking, PointPicking and CellPicking. The first option uses the zBuffer from graphics rendering, the second uses the 3D points from the closest surface mesh, and the third option uses the cells of that mesh. The last option is the slowest, the first one the fastest. However, the first option cannot use transparent data object and the tolerance of the picked position to the selected point should be considered. PointPicking also need a tolerance around the picking position to select the closest point in the mesh. The CellPicker performs very well, if the foreground surface part (i.e. the surfacepart that is closest to the scene's cameras) needs to be picked. */ itkSetEnumMacro(PickingMode, PickingMode); itkGetEnumMacro(PickingMode, PickingMode); virtual void PickWorldPoint(const Point2D &displayPoint, Point3D &worldPoint) const override; virtual mitk::DataNode *PickObject(const Point2D &displayPosition, Point3D &worldPosition) const override; /** * @brief WriteSimpleText Write a text in a renderwindow. * * Writes some 2D text as overlay. Function returns an unique int Text_ID for each call, which can be used via the GetTextLabelProperty(int text_id) function in order to get a vtkTextProperty. This property enables the setup of font, font size, etc. * * @deprecatedSince{2015_05} Please use mitkTextOverlay2D instead. * See mitkTextOverlay2DRenderingTest for an example. */ DEPRECATED(int WriteSimpleText(std::string text, double posX, double posY, double color1 = 0.0, double color2 = 1.0, double color3 = 0.0, float opacity = 1.0)); /** * @brief CGetTextLabelProperty an be used in order to get a vtkTextProperty for * a specific text_id. This property enables the setup of font, font size, etc. * @param text_id the id of the text property. * @deprecatedSince{2015_05} Please use mitkTextOverlay2D instead. * See mitkTextOverlay2DRenderingTest for an example. */ DEPRECATED(vtkTextProperty *GetTextLabelProperty(int text_id)); /** This method calculates the bounds of the DataStorage (if it contains any * valid data), creates a geometry from these bounds and sets it as world * geometry of the renderer. * * Call this method to re-initialize the renderer to the current DataStorage * (e.g. after loading an additional dataset), to ensure that the view is * aligned correctly. */ virtual bool SetWorldGeometryToDataStorageBounds() override; /** * \brief Used by vtkPointPicker/vtkPicker. * This will query a list of all objects in MITK and provide every vtk based mapper to the picker. */ void InitPathTraversal(); /** * \brief Used by vtkPointPicker/vtkPicker. * This will query a list of all objects in MITK and provide every vtk based mapper to the picker. */ vtkAssemblyPath *GetNextPath(); int GetNumberOfPaths(); const vtkWorldPointPicker *GetWorldPointPicker() const; const vtkPointPicker *GetPointPicker() const; const vtkCellPicker *GetCellPicker() const; /** * \brief Release vtk-based graphics resources. Called by * vtkMitkRenderProp::ReleaseGraphicsResources. */ virtual void ReleaseGraphicsResources(vtkWindow *renWin); MappersMapType GetMappersMap() const; static bool useImmediateModeRendering(); protected: VtkPropRenderer( const char *name = "VtkPropRenderer", vtkRenderWindow *renWin = nullptr, mitk::RenderingManager *rm = nullptr, mitk::BaseRenderer::RenderingMode::Type renderingMode = mitk::BaseRenderer::RenderingMode::Standard); virtual ~VtkPropRenderer(); virtual void Update() override; static void RenderingCallback(vtkObject *caller, unsigned long eid, void *clientdata, void *calldata); virtual void UpdatePaths(); // apply transformations and properties recursively private: vtkSmartPointer<vtkAssemblyPaths> m_Paths; vtkTimeStamp m_PathTime; // prepare all mitk::mappers for rendering void PrepareMapperQueue(); /** \brief Set parallel projection, remove the interactor and the lights of VTK. */ bool Initialize2DvtkCamera(); bool m_InitNeeded; bool m_ResizeNeeded; MapperSlotId m_CameraInitializedForMapperID; // Picking vtkWorldPointPicker *m_WorldPointPicker; vtkPointPicker *m_PointPicker; vtkCellPicker *m_CellPicker; PickingMode m_PickingMode; // Explicit use of SmartPointer to avoid circular #includes itk::SmartPointer<mitk::Mapper> m_CurrentWorldPlaneGeometryMapper; vtkLightKit *m_LightKit; // sorted list of mappers MappersMapType m_MappersMap; // rendering of text vtkRenderer *m_TextRenderer; typedef std::map<unsigned int, vtkTextActor *> TextMapType; TextMapType m_TextCollection; }; } // namespace mitk #endif /* mitkVtkPropRenderer_h */
Java
import urllib from canvas import util def make_cookie_key(key): return 'after_signup_' + str(key) def _get(request, key): key = make_cookie_key(key) val = request.COOKIES.get(key) if val is not None: val = util.loads(urllib.unquote(val)) return (key, val,) def get_posted_comment(request): ''' Gets a comment waiting to be posted, if one exists. Returns a pair containing the cookie key used to retrieve it and its deserialized JSON. ''' #TODO use dcramer's django-cookies so that we don't rely on having the response object to mutate cookies. # That would make this API much cleaner and isolated. return _get(request, 'post_comment')
Java
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @author Mark Gates @generated from testing_zunmqr.cpp normal z -> s, Fri Jan 30 19:00:25 2015 */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // includes, project #include "flops.h" #include "magma.h" #include "magma_lapack.h" #include "testings.h" /* //////////////////////////////////////////////////////////////////////////// -- Testing sormqr */ int main( int argc, char** argv ) { TESTING_INIT(); real_Double_t gflops, gpu_perf, gpu_time, cpu_perf, cpu_time; float error, work[1]; float c_neg_one = MAGMA_S_NEG_ONE; magma_int_t ione = 1; magma_int_t mm, m, n, k, size, info; magma_int_t ISEED[4] = {0,0,0,1}; magma_int_t nb, ldc, lda, lwork, lwork_max; float *C, *R, *A, *W, *tau; magma_int_t status = 0; magma_opts opts; parse_opts( argc, argv, &opts ); // need slightly looser bound (60*eps instead of 30*eps) for some tests opts.tolerance = max( 60., opts.tolerance ); float tol = opts.tolerance * lapackf77_slamch("E"); // test all combinations of input parameters magma_side_t side [] = { MagmaLeft, MagmaRight }; magma_trans_t trans[] = { MagmaTrans, MagmaNoTrans }; printf(" M N K side trans CPU GFlop/s (sec) GPU GFlop/s (sec) ||R||_F / ||QC||_F\n"); printf("===============================================================================================\n"); for( int itest = 0; itest < opts.ntest; ++itest ) { for( int iside = 0; iside < 2; ++iside ) { for( int itran = 0; itran < 2; ++itran ) { for( int iter = 0; iter < opts.niter; ++iter ) { m = opts.msize[itest]; n = opts.nsize[itest]; k = opts.ksize[itest]; nb = magma_get_sgeqrf_nb( m ); ldc = m; // A is m x k (left) or n x k (right) mm = (side[iside] == MagmaLeft ? m : n); lda = mm; gflops = FLOPS_SORMQR( m, n, k, side[iside] ) / 1e9; if ( side[iside] == MagmaLeft && m < k ) { printf( "%5d %5d %5d %4c %5c skipping because side=left and m < k\n", (int) m, (int) n, (int) k, lapacke_side_const( side[iside] ), lapacke_trans_const( trans[itran] ) ); continue; } if ( side[iside] == MagmaRight && n < k ) { printf( "%5d %5d %5d %4c %5c skipping because side=right and n < k\n", (int) m, (int) n, (int) k, lapacke_side_const( side[iside] ), lapacke_trans_const( trans[itran] ) ); continue; } // need at least 2*nb*nb for geqrf lwork_max = max( max( m*nb, n*nb ), 2*nb*nb ); TESTING_MALLOC_CPU( C, float, ldc*n ); TESTING_MALLOC_CPU( R, float, ldc*n ); TESTING_MALLOC_CPU( A, float, lda*k ); TESTING_MALLOC_CPU( W, float, lwork_max ); TESTING_MALLOC_CPU( tau, float, k ); // C is full, m x n size = ldc*n; lapackf77_slarnv( &ione, ISEED, &size, C ); lapackf77_slacpy( "Full", &m, &n, C, &ldc, R, &ldc ); size = lda*k; lapackf77_slarnv( &ione, ISEED, &size, A ); // compute QR factorization to get Householder vectors in A, tau magma_sgeqrf( mm, k, A, lda, tau, W, lwork_max, &info ); if (info != 0) printf("magma_sgeqrf returned error %d: %s.\n", (int) info, magma_strerror( info )); /* ===================================================================== Performs operation using LAPACK =================================================================== */ cpu_time = magma_wtime(); lapackf77_sormqr( lapack_side_const( side[iside] ), lapack_trans_const( trans[itran] ), &m, &n, &k, A, &lda, tau, C, &ldc, W, &lwork_max, &info ); cpu_time = magma_wtime() - cpu_time; cpu_perf = gflops / cpu_time; if (info != 0) printf("lapackf77_sormqr returned error %d: %s.\n", (int) info, magma_strerror( info )); /* ==================================================================== Performs operation using MAGMA =================================================================== */ // query for workspace size lwork = -1; magma_sormqr( side[iside], trans[itran], m, n, k, A, lda, tau, R, ldc, W, lwork, &info ); if (info != 0) printf("magma_sormqr (lwork query) returned error %d: %s.\n", (int) info, magma_strerror( info )); lwork = (magma_int_t) MAGMA_S_REAL( W[0] ); if ( lwork < 0 || lwork > lwork_max ) { printf("optimal lwork %d > lwork_max %d\n", (int) lwork, (int) lwork_max ); lwork = lwork_max; } gpu_time = magma_wtime(); magma_sormqr( side[iside], trans[itran], m, n, k, A, lda, tau, R, ldc, W, lwork, &info ); gpu_time = magma_wtime() - gpu_time; gpu_perf = gflops / gpu_time; if (info != 0) printf("magma_sormqr returned error %d: %s.\n", (int) info, magma_strerror( info )); /* ===================================================================== compute relative error |QC_magma - QC_lapack| / |QC_lapack| =================================================================== */ error = lapackf77_slange( "Fro", &m, &n, C, &ldc, work ); size = ldc*n; blasf77_saxpy( &size, &c_neg_one, C, &ione, R, &ione ); error = lapackf77_slange( "Fro", &m, &n, R, &ldc, work ) / error; printf( "%5d %5d %5d %4c %5c %7.2f (%7.2f) %7.2f (%7.2f) %8.2e %s\n", (int) m, (int) n, (int) k, lapacke_side_const( side[iside] ), lapacke_trans_const( trans[itran] ), cpu_perf, cpu_time, gpu_perf, gpu_time, error, (error < tol ? "ok" : "failed") ); status += ! (error < tol); TESTING_FREE_CPU( C ); TESTING_FREE_CPU( R ); TESTING_FREE_CPU( A ); TESTING_FREE_CPU( W ); TESTING_FREE_CPU( tau ); fflush( stdout ); } if ( opts.niter > 1 ) { printf( "\n" ); } }} // end iside, itran printf( "\n" ); } TESTING_FINALIZE(); return status; }
Java
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/bluetooth/bluetooth_adapter.h" #include "base/bind.h" #include "base/lazy_instance.h" #include "base/logging.h" #include "base/stl_util.h" #include "base/values.h" #include "chrome/browser/chromeos/bluetooth/bluetooth_device.h" #include "chromeos/dbus/bluetooth_adapter_client.h" #include "chromeos/dbus/bluetooth_device_client.h" #include "chromeos/dbus/bluetooth_manager_client.h" #include "chromeos/dbus/bluetooth_out_of_band_client.h" #include "chromeos/dbus/dbus_thread_manager.h" #include "dbus/object_path.h" namespace { // Shared default adapter instance, we don't want to keep this class around // if nobody is using it so use a WeakPtr and create the object when needed; // since Google C++ Style (and clang's static analyzer) forbids us having // exit-time destructors we use a leaky lazy instance for it. base::LazyInstance<base::WeakPtr<chromeos::BluetoothAdapter> >::Leaky default_adapter = LAZY_INSTANCE_INITIALIZER; } // namespace namespace chromeos { BluetoothAdapter::BluetoothAdapter() : weak_ptr_factory_(this), track_default_(false), powered_(false), discovering_(false) { DBusThreadManager::Get()->GetBluetoothManagerClient()-> AddObserver(this); DBusThreadManager::Get()->GetBluetoothAdapterClient()-> AddObserver(this); DBusThreadManager::Get()->GetBluetoothDeviceClient()-> AddObserver(this); } BluetoothAdapter::~BluetoothAdapter() { DBusThreadManager::Get()->GetBluetoothDeviceClient()-> RemoveObserver(this); DBusThreadManager::Get()->GetBluetoothAdapterClient()-> RemoveObserver(this); DBusThreadManager::Get()->GetBluetoothManagerClient()-> RemoveObserver(this); STLDeleteValues(&devices_); } void BluetoothAdapter::AddObserver(Observer* observer) { DCHECK(observer); observers_.AddObserver(observer); } void BluetoothAdapter::RemoveObserver(Observer* observer) { DCHECK(observer); observers_.RemoveObserver(observer); } bool BluetoothAdapter::IsPresent() const { return !object_path_.value().empty(); } bool BluetoothAdapter::IsPowered() const { return powered_; } void BluetoothAdapter::SetPowered(bool powered, const base::Closure& callback, const ErrorCallback& error_callback) { DBusThreadManager::Get()->GetBluetoothAdapterClient()-> GetProperties(object_path_)->powered.Set( powered, base::Bind(&BluetoothAdapter::OnSetPowered, weak_ptr_factory_.GetWeakPtr(), callback, error_callback)); } bool BluetoothAdapter::IsDiscovering() const { return discovering_; } void BluetoothAdapter::SetDiscovering(bool discovering, const base::Closure& callback, const ErrorCallback& error_callback) { if (discovering) { DBusThreadManager::Get()->GetBluetoothAdapterClient()-> StartDiscovery(object_path_, base::Bind(&BluetoothAdapter::OnStartDiscovery, weak_ptr_factory_.GetWeakPtr(), callback, error_callback)); } else { DBusThreadManager::Get()->GetBluetoothAdapterClient()-> StopDiscovery(object_path_, base::Bind(&BluetoothAdapter::OnStopDiscovery, weak_ptr_factory_.GetWeakPtr(), callback, error_callback)); } } BluetoothAdapter::DeviceList BluetoothAdapter::GetDevices() { ConstDeviceList const_devices = const_cast<const BluetoothAdapter *>(this)->GetDevices(); DeviceList devices; for (ConstDeviceList::const_iterator i = const_devices.begin(); i != const_devices.end(); ++i) devices.push_back(const_cast<BluetoothDevice *>(*i)); return devices; } BluetoothAdapter::ConstDeviceList BluetoothAdapter::GetDevices() const { ConstDeviceList devices; for (DevicesMap::const_iterator iter = devices_.begin(); iter != devices_.end(); ++iter) devices.push_back(iter->second); return devices; } BluetoothDevice* BluetoothAdapter::GetDevice(const std::string& address) { return const_cast<BluetoothDevice *>( const_cast<const BluetoothAdapter *>(this)->GetDevice(address)); } const BluetoothDevice* BluetoothAdapter::GetDevice( const std::string& address) const { DevicesMap::const_iterator iter = devices_.find(address); if (iter != devices_.end()) return iter->second; return NULL; } void BluetoothAdapter::ReadLocalOutOfBandPairingData( const BluetoothOutOfBandPairingDataCallback& callback, const ErrorCallback& error_callback) { DBusThreadManager::Get()->GetBluetoothOutOfBandClient()-> ReadLocalData(object_path_, base::Bind(&BluetoothAdapter::OnReadLocalData, weak_ptr_factory_.GetWeakPtr(), callback, error_callback)); } void BluetoothAdapter::TrackDefaultAdapter() { DVLOG(1) << "Tracking default adapter"; track_default_ = true; DBusThreadManager::Get()->GetBluetoothManagerClient()-> DefaultAdapter(base::Bind(&BluetoothAdapter::AdapterCallback, weak_ptr_factory_.GetWeakPtr())); } void BluetoothAdapter::FindAdapter(const std::string& address) { DVLOG(1) << "Using adapter " << address; track_default_ = false; DBusThreadManager::Get()->GetBluetoothManagerClient()-> FindAdapter(address, base::Bind(&BluetoothAdapter::AdapterCallback, weak_ptr_factory_.GetWeakPtr())); } void BluetoothAdapter::AdapterCallback(const dbus::ObjectPath& adapter_path, bool success) { if (success) { ChangeAdapter(adapter_path); } else if (!object_path_.value().empty()) { RemoveAdapter(); } } void BluetoothAdapter::DefaultAdapterChanged( const dbus::ObjectPath& adapter_path) { if (track_default_) ChangeAdapter(adapter_path); } void BluetoothAdapter::AdapterRemoved(const dbus::ObjectPath& adapter_path) { if (adapter_path == object_path_) RemoveAdapter(); } void BluetoothAdapter::ChangeAdapter(const dbus::ObjectPath& adapter_path) { if (adapter_path == object_path_) return; // Determine whether this is a change of adapter or gaining an adapter, // remember for later so we can send the right notification. const bool new_adapter = object_path_.value().empty(); if (new_adapter) { DVLOG(1) << "Adapter path initialized to " << adapter_path.value(); } else { DVLOG(1) << "Adapter path changed from " << object_path_.value() << " to " << adapter_path.value(); // Invalidate the devices list, since the property update does not // remove them. ClearDevices(); } object_path_ = adapter_path; // Update properties to their new values. BluetoothAdapterClient::Properties* properties = DBusThreadManager::Get()->GetBluetoothAdapterClient()-> GetProperties(object_path_); address_ = properties->address.value(); PoweredChanged(properties->powered.value()); DiscoveringChanged(properties->discovering.value()); DevicesChanged(properties->devices.value()); // Notify observers if we did not have an adapter before, the case of // moving from one to another is hidden from layers above. if (new_adapter) FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, AdapterPresentChanged(this, true)); } void BluetoothAdapter::RemoveAdapter() { DVLOG(1) << "Adapter lost."; PoweredChanged(false); DiscoveringChanged(false); ClearDevices(); object_path_ = dbus::ObjectPath(""); address_.clear(); FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, AdapterPresentChanged(this, false)); } void BluetoothAdapter::OnSetPowered(const base::Closure& callback, const ErrorCallback& error_callback, bool success) { if (success) callback.Run(); else error_callback.Run(); } void BluetoothAdapter::PoweredChanged(bool powered) { if (powered == powered_) return; powered_ = powered; FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, AdapterPoweredChanged(this, powered_)); } void BluetoothAdapter::OnStartDiscovery(const base::Closure& callback, const ErrorCallback& error_callback, const dbus::ObjectPath& adapter_path, bool success) { if (success) { DVLOG(1) << object_path_.value() << ": started discovery."; // Clear devices found in previous discovery attempts ClearDiscoveredDevices(); callback.Run(); } else { // TODO(keybuk): in future, don't run the callback if the error was just // that we were already discovering. error_callback.Run(); } } void BluetoothAdapter::OnStopDiscovery(const base::Closure& callback, const ErrorCallback& error_callback, const dbus::ObjectPath& adapter_path, bool success) { if (success) { DVLOG(1) << object_path_.value() << ": stopped discovery."; callback.Run(); // Leave found devices available for perusing. } else { // TODO(keybuk): in future, don't run the callback if the error was just // that we weren't discovering. error_callback.Run(); } } void BluetoothAdapter::DiscoveringChanged(bool discovering) { if (discovering == discovering_) return; discovering_ = discovering; FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, AdapterDiscoveringChanged(this, discovering_)); } void BluetoothAdapter::OnReadLocalData( const BluetoothOutOfBandPairingDataCallback& callback, const ErrorCallback& error_callback, const BluetoothOutOfBandPairingData& data, bool success) { if (success) callback.Run(data); else error_callback.Run(); } void BluetoothAdapter::AdapterPropertyChanged( const dbus::ObjectPath& adapter_path, const std::string& property_name) { if (adapter_path != object_path_) return; BluetoothAdapterClient::Properties* properties = DBusThreadManager::Get()->GetBluetoothAdapterClient()-> GetProperties(object_path_); if (property_name == properties->powered.name()) { PoweredChanged(properties->powered.value()); } else if (property_name == properties->discovering.name()) { DiscoveringChanged(properties->discovering.value()); } else if (property_name == properties->devices.name()) { DevicesChanged(properties->devices.value()); } } void BluetoothAdapter::DevicePropertyChanged( const dbus::ObjectPath& device_path, const std::string& property_name) { UpdateDevice(device_path); } void BluetoothAdapter::UpdateDevice(const dbus::ObjectPath& device_path) { BluetoothDeviceClient::Properties* properties = DBusThreadManager::Get()->GetBluetoothDeviceClient()-> GetProperties(device_path); // When we first see a device, we may not know the address yet and need to // wait for the DevicePropertyChanged signal before adding the device. const std::string address = properties->address.value(); if (address.empty()) return; // The device may be already known to us, either because this is an update // to properties, or the device going from discovered to connected and // pairing gaining an object path in the process. In any case, we want // to update the existing object, not create a new one. DevicesMap::iterator iter = devices_.find(address); BluetoothDevice* device; const bool update_device = (iter != devices_.end()); if (update_device) { device = iter->second; } else { device = BluetoothDevice::Create(this); devices_[address] = device; } const bool was_paired = device->IsPaired(); if (!was_paired) { DVLOG(1) << "Assigned object path " << device_path.value() << " to device " << address; device->SetObjectPath(device_path); } device->Update(properties, true); // Don't send a duplicate added event for supported devices that were // previously visible or for already paired devices, send a changed // event instead. We always send one event or the other since we always // inform observers about paired devices whether or not they're supported. if (update_device && (device->IsSupported() || was_paired)) { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceChanged(this, device)); } else { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceAdded(this, device)); } } void BluetoothAdapter::ClearDevices() { DevicesMap replace; devices_.swap(replace); for (DevicesMap::iterator iter = replace.begin(); iter != replace.end(); ++iter) { BluetoothDevice* device = iter->second; if (device->IsSupported() || device->IsPaired()) FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceRemoved(this, device)); delete device; } } void BluetoothAdapter::DeviceCreated(const dbus::ObjectPath& adapter_path, const dbus::ObjectPath& device_path) { if (adapter_path != object_path_) return; UpdateDevice(device_path); } void BluetoothAdapter::DeviceRemoved(const dbus::ObjectPath& adapter_path, const dbus::ObjectPath& device_path) { if (adapter_path != object_path_) return; DevicesMap::iterator iter = devices_.begin(); while (iter != devices_.end()) { BluetoothDevice* device = iter->second; DevicesMap::iterator temp = iter; ++iter; if (device->object_path_ != device_path) continue; // DeviceRemoved can also be called to indicate a device that is visible // during discovery has disconnected, but it is still visible to the // adapter, so don't remove in that case and only clear the object path. if (!device->IsVisible()) { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceRemoved(this, device)); DVLOG(1) << "Removed device " << device->address(); delete device; devices_.erase(temp); } else { DVLOG(1) << "Removed object path from device " << device->address(); device->RemoveObjectPath(); // If the device is not supported then we want to act as if it was // removed, even though it is still visible to the adapter. if (!device->IsSupported()) { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceRemoved(this, device)); } else { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceChanged(this, device)); } } } } void BluetoothAdapter::DevicesChanged( const std::vector<dbus::ObjectPath>& devices) { for (std::vector<dbus::ObjectPath>::const_iterator iter = devices.begin(); iter != devices.end(); ++iter) UpdateDevice(*iter); } void BluetoothAdapter::ClearDiscoveredDevices() { DevicesMap::iterator iter = devices_.begin(); while (iter != devices_.end()) { BluetoothDevice* device = iter->second; DevicesMap::iterator temp = iter; ++iter; if (!device->IsPaired()) { if (device->IsSupported()) FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceRemoved(this, device)); delete device; devices_.erase(temp); } } } void BluetoothAdapter::DeviceFound( const dbus::ObjectPath& adapter_path, const std::string& address, const BluetoothDeviceClient::Properties& properties) { if (adapter_path != object_path_) return; // DeviceFound can also be called to indicate that a device we've // paired with is now visible to the adapter during discovery, in which // case we want to update the existing object, not create a new one. BluetoothDevice* device; DevicesMap::iterator iter = devices_.find(address); const bool update_device = (iter != devices_.end()); if (update_device) { device = iter->second; } else { device = BluetoothDevice::Create(this); devices_[address] = device; } DVLOG(1) << "Device " << address << " is visible to the adapter"; device->SetVisible(true); device->Update(&properties, false); // Don't send a duplicated added event for duplicate signals for supported // devices that were previously visible (should never happen) or for already // paired devices, send a changed event instead. We do not inform observers // if we find or update an unconnected and unsupported device. if (update_device && (device->IsSupported() || device->IsPaired())) { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceChanged(this, device)); } else if (device->IsSupported()) { FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceAdded(this, device)); } } void BluetoothAdapter::DeviceDisappeared(const dbus::ObjectPath& adapter_path, const std::string& address) { if (adapter_path != object_path_) return; DevicesMap::iterator iter = devices_.find(address); if (iter == devices_.end()) return; BluetoothDevice* device = iter->second; // DeviceDisappeared can also be called to indicate that a device we've // paired with is no longer visible to the adapter, so don't remove // in that case and only clear the visible flag. if (!device->IsPaired()) { if (device->IsSupported()) FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceRemoved(this, device)); DVLOG(1) << "Discovered device " << device->address() << " is no longer visible to the adapter"; delete device; devices_.erase(iter); } else { DVLOG(1) << "Paired device " << device->address() << " is no longer visible to the adapter"; device->SetVisible(false); FOR_EACH_OBSERVER(BluetoothAdapter::Observer, observers_, DeviceChanged(this, device)); } } // static scoped_refptr<BluetoothAdapter> BluetoothAdapter::DefaultAdapter() { if (!default_adapter.Get().get()) { BluetoothAdapter* new_adapter = new BluetoothAdapter; default_adapter.Get() = new_adapter->weak_ptr_factory_.GetWeakPtr(); default_adapter.Get()->TrackDefaultAdapter(); } return scoped_refptr<BluetoothAdapter>(default_adapter.Get()); } // static BluetoothAdapter* BluetoothAdapter::Create(const std::string& address) { BluetoothAdapter* adapter = new BluetoothAdapter; adapter->FindAdapter(address); return adapter; } } // namespace chromeos
Java
/* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 7.00.0499 */ /* at Mon Dec 01 09:02:10 2008 */ /* Compiler settings for e:/builds/tinderbox/XR-Trunk/WINNT_5.2_Depend/mozilla/other-licenses/ia2/Accessible2.idl: Oicf, W1, Zp8, env=Win32 (32b run) protocol : dce , ms_ext, app_config, c_ext error checks: allocation ref bounds_check enum stub_data VC __declspec() decoration level: __declspec(uuid()), __declspec(selectany), __declspec(novtable) DECLSPEC_UUID(), MIDL_INTERFACE() */ //@@MIDL_FILE_HEADING( ) #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, IID_IAccessible2,0xE89F726E,0xC4F4,0x4c19,0xBB,0x19,0xB6,0x47,0xD7,0xFA,0x84,0x78); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
Java
using System; using System.Collections.Generic; using System.Linq; using System.Runtime.Serialization; using System.Text; namespace Inbox2.Platform.Channels.Entities { [Serializable] [DataContract] public enum ProfileType { [EnumMember(Value = "1")] Default = 0, [EnumMember(Value = "2")] Social = 10, } }
Java
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>Class Simpletools\Page\Layout</title> <link rel="stylesheet" href="resources/style.css?e99947befd7bf673c6b43ff75e9e0f170c88a60e"> </head> <body> <div id="left"> <div id="menu"> <a href="index.html" title="Overview"><span>Overview</span></a> <div id="groups"> <h3>Namespaces</h3> <ul> <li class="active"> <a href="namespace-Simpletools.html"> Simpletools<span></span> </a> <ul> <li> <a href="namespace-Simpletools.Autoload.html"> Autoload </a> </li> <li> <a href="namespace-Simpletools.Config.html"> Config </a> </li> <li> <a href="namespace-Simpletools.Db.html"> Db<span></span> </a> <ul> <li> <a href="namespace-Simpletools.Db.Mysql.html"> Mysql </a> </li> </ul></li> <li> <a href="namespace-Simpletools.Event.html"> Event </a> </li> <li> <a href="namespace-Simpletools.Http.html"> Http </a> </li> <li> <a href="namespace-Simpletools.Mvc.html"> Mvc </a> </li> <li class="active"> <a href="namespace-Simpletools.Page.html"> Page </a> </li> <li> <a href="namespace-Simpletools.Store.html"> Store </a> </li> </ul></li> </ul> </div> <hr> <div id="elements"> <h3>Classes</h3> <ul> <li class="active"><a href="class-Simpletools.Page.Layout.html">Layout</a></li> </ul> </div> </div> </div> <div id="splitter"></div> <div id="right"> <div id="rightInner"> <form id="search"> <input type="hidden" name="cx" value=""> <input type="hidden" name="ie" value="UTF-8"> <input type="text" name="q" class="text" placeholder="Search"> </form> <div id="navigation"> <ul> <li> <a href="index.html" title="Overview"><span>Overview</span></a> </li> <li> <a href="namespace-Simpletools.Page.html" title="Summary of Simpletools\Page"><span>Namespace</span></a> </li> <li class="active"> <span>Class</span> </li> </ul> <ul> </ul> <ul> </ul> </div> <div id="content" class="class"> <h1>Class Layout</h1> <div class="info"> <b>Namespace:</b> <a href="namespace-Simpletools.html">Simpletools</a>\<a href="namespace-Simpletools.Page.html">Page</a><br> <b>Located at</b> <a href="source-class-Simpletools.Page.Layout.html#44-413" title="Go to source code">Simpletools/Page/Layout.php</a> <br> </div> <table class="summary methods" id="methods"> <caption>Methods summary</caption> <tr data-order="__construct" id="___construct"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#___construct">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#67-89" title="Go to source code">__construct</a>( <span>array <var>$settings</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="settings" id="_settings"> <td class="attributes"><code> public static &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_settings">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#91-104" title="Go to source code">settings</a>( <span>array <var>$settings</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="getInstance" id="_getInstance"> <td class="attributes"><code> public static &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_getInstance">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#112-125" title="Go to source code">getInstance</a>( <span> <var>$settings</var> = <span class="php-keyword1">null</span></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="setLayout" id="_setLayout"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_setLayout">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#127-135" title="Go to source code">setLayout</a>( <span> <var>$layDir</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="registerLayouts" id="_registerLayouts"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_registerLayouts">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#137-147" title="Go to source code">registerLayouts</a>( <span>array <var>$layouts</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="start" id="_start"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_start">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#149-152" title="Go to source code">start</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="startBuffer" id="_startBuffer"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_startBuffer">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#154-159" title="Go to source code">startBuffer</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="clearBuffer" id="_clearBuffer"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_clearBuffer">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#161-164" title="Go to source code">clearBuffer</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="render" id="_render"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_render">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#166-195" title="Go to source code">render</a>( <span> <var>$minify</var> = <span class="php-keyword1">false</span></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="layout" id="_layout"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_layout">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#197-200" title="Go to source code">layout</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayContent" id="_displayContent"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayContent">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#202-205" title="Go to source code">displayContent</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="disable" id="_disable"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_disable">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#207-213" title="Go to source code">disable</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="set" id="_set"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_set">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#215-223" title="Go to source code">set</a>( <span> <var>$layout</var> = <span class="php-quote">'default'</span></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="enable" id="_enable"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_enable">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#225-235" title="Go to source code">enable</a>( <span> <var>$layout</var> = <span class="php-quote">'default'</span></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="setTitle" id="_setTitle"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_setTitle">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#244-248" title="Go to source code">setTitle</a>( <span> <var>$title</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="setDefaultLayoutTitle" id="_setDefaultLayoutTitle"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_setDefaultLayoutTitle">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#250-254" title="Go to source code">setDefaultLayoutTitle</a>( <span> <var>$title</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayTitle" id="_displayTitle"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayTitle">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#256-259" title="Go to source code">displayTitle</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="setDescription" id="_setDescription"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_setDescription">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#261-265" title="Go to source code">setDescription</a>( <span> <var>$description</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayDescription" id="_displayDescription"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayDescription">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#267-270" title="Go to source code">displayDescription</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addInternalCss" id="_addInternalCss"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addInternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#272-276" title="Go to source code">addInternalCss</a>( <span> <var>$style</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayInternalCss" id="_displayInternalCss"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayInternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#278-288" title="Go to source code">displayInternalCss</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="clearInternalCss" id="_clearInternalCss"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_clearInternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#290-294" title="Go to source code">clearInternalCss</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addExternalCss" id="_addExternalCss"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addExternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#296-306" title="Go to source code">addExternalCss</a>( <span> <var>$href</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addExternalCss_" id="_addExternalCss_"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addExternalCss_">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#308-312" title="Go to source code">addExternalCss_</a>( <span> <var>$href</var></span>, <span> <var>$media</var> = <span class="php-quote">'screen'</span></span>, <span> <var>$rel</var> = <span class="php-quote">'stylesheet'</span></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="clearExternalCss" id="_clearExternalCss"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_clearExternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#314-318" title="Go to source code">clearExternalCss</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayExternalCss" id="_displayExternalCss"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayExternalCss">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#320-323" title="Go to source code">displayExternalCss</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addMetaTag" id="_addMetaTag"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addMetaTag">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#325-330" title="Go to source code">addMetaTag</a>( <span> <var>$name</var></span>, <span> <var>$content</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayMetaTags" id="_displayMetaTags"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayMetaTags">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#332-344" title="Go to source code">displayMetaTags</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addHeadLink" id="_addHeadLink"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addHeadLink">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#346-357" title="Go to source code">addHeadLink</a>( <span>array <var>$options</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayHeadLinks" id="_displayHeadLinks"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayHeadLinks">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#359-362" title="Go to source code">displayHeadLinks</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addExternalJs" id="_addExternalJs"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addExternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#364-374" title="Go to source code">addExternalJs</a>( <span> <var>$href</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="clearExternalJs" id="_clearExternalJs"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_clearExternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#376-380" title="Go to source code">clearExternalJs</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayExternalJs" id="_displayExternalJs"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayExternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#382-385" title="Go to source code">displayExternalJs</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="addInternalJs" id="_addInternalJs"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_addInternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#387-391" title="Go to source code">addInternalJs</a>( <span> <var>$source</var></span> )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="clearInternalJs" id="_clearInternalJs"> <td class="attributes"><code> public &amp; </code> </td> <td class="name"><div> <a class="anchor" href="#_clearInternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#393-397" title="Go to source code">clearInternalJs</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> <tr data-order="displayInternalJs" id="_displayInternalJs"> <td class="attributes"><code> public </code> </td> <td class="name"><div> <a class="anchor" href="#_displayInternalJs">#</a> <code><a href="source-class-Simpletools.Page.Layout.html#399-411" title="Go to source code">displayInternalJs</a>( )</code> <div class="description short"> </div> <div class="description detailed hidden"> </div> </div></td> </tr> </table> <table class="summary properties" id="properties"> <caption>Properties summary</caption> <tr data-order="content" id="$content"> <td class="attributes"><code> public string </code></td> <td class="name"> <a href="source-class-Simpletools.Page.Layout.html#52" title="Go to source code"><var>$content</var></a> <div class="description short"> </div> <div class="description detailed hidden"> </div> </td> <td class="value"> <div> <a href="#$content" class="anchor">#</a> <code><span class="php-quote">''</span></code> </div> </td> </tr> <tr data-order="description" id="$description"> <td class="attributes"><code> public </code></td> <td class="name"> <a href="source-class-Simpletools.Page.Layout.html#57" title="Go to source code"><var>$description</var></a> <div class="description short"> </div> <div class="description detailed hidden"> </div> </td> <td class="value"> <div> <a href="#$description" class="anchor">#</a> <code><span class="php-keyword1">null</span></code> </div> </td> </tr> <tr data-order="title" id="$title"> <td class="attributes"><code> public </code></td> <td class="name"> <a href="source-class-Simpletools.Page.Layout.html#58" title="Go to source code"><var>$title</var></a> <div class="description short"> </div> <div class="description detailed hidden"> </div> </td> <td class="value"> <div> <a href="#$title" class="anchor">#</a> <code><span class="php-keyword1">null</span></code> </div> </td> </tr> <tr data-order="_layouts" id="$_layouts"> <td class="attributes"><code> protected array </code></td> <td class="name"> <a href="source-class-Simpletools.Page.Layout.html#62" title="Go to source code"><var>$_layouts</var></a> <div class="description short"> </div> <div class="description detailed hidden"> </div> </td> <td class="value"> <div> <a href="#$_layouts" class="anchor">#</a> <code><span class="php-keyword1">array</span>()</code> </div> </td> </tr> <tr data-order="_currentLayout" id="$_currentLayout"> <td class="attributes"><code> protected string </code></td> <td class="name"> <a href="source-class-Simpletools.Page.Layout.html#63" title="Go to source code"><var>$_currentLayout</var></a> <div class="description short"> </div> <div class="description detailed hidden"> </div> </td> <td class="value"> <div> <a href="#$_currentLayout" class="anchor">#</a> <code><span class="php-quote">'default'</span></code> </div> </td> </tr> </table> </div> <div id="footer"> API documentation generated by <a href="http://apigen.org">ApiGen</a> </div> </div> </div> <script src="resources/combined.js"></script> <script src="elementlist.js"></script> </body> </html>
Java
#ifndef NT2_GALLERY_INCLUDE_FUNCTIONS_SCALAR_PARTER_HPP_INCLUDED #define NT2_GALLERY_INCLUDE_FUNCTIONS_SCALAR_PARTER_HPP_INCLUDED #include <nt2/gallery/functions/parter.hpp> #endif
Java
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ppc64 import ( "cmd/compile/internal/gc" "cmd/internal/obj" "cmd/internal/obj/ppc64" ) const ( LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite RightRdwr uint32 = gc.RightRead | gc.RightWrite ) // This table gives the basic information about instruction // generated by the compiler and processed in the optimizer. // See opt.h for bit definitions. // // Instructions not generated need not be listed. // As an exception to that rule, we typically write down all the // size variants of an operation even if we just use a subset. // // The table is formatted for 8-space tabs. var progtable = [ppc64.ALAST]obj.ProgInfo{ obj.ATYPE: {Flags: gc.Pseudo | gc.Skip}, obj.ATEXT: {Flags: gc.Pseudo}, obj.AFUNCDATA: {Flags: gc.Pseudo}, obj.APCDATA: {Flags: gc.Pseudo}, obj.AUNDEF: {Flags: gc.Break}, obj.AUSEFIELD: {Flags: gc.OK}, obj.ACHECKNIL: {Flags: gc.LeftRead}, obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite}, obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite}, obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead}, // NOP is an internal no-op that also stands // for USED and SET annotations, not the Power opcode. obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite}, // Integer ppc64.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AMULHD: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, ppc64.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, ppc64.ATD: {Flags: gc.SizeQ | gc.RightRead}, // Floating point. ppc64.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFCFID: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, ppc64.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead}, ppc64.AFRSP: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, // Moves ppc64.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc}, ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc}, ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, // there is no AMOVWU. ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc}, ppc64.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, ppc64.AMOVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc}, ppc64.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, ppc64.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move}, // Jumps ppc64.ABR: {Flags: gc.Jump | gc.Break}, ppc64.ABL: {Flags: gc.Call}, ppc64.ABEQ: {Flags: gc.Cjmp}, ppc64.ABNE: {Flags: gc.Cjmp}, ppc64.ABGE: {Flags: gc.Cjmp}, ppc64.ABLT: {Flags: gc.Cjmp}, ppc64.ABGT: {Flags: gc.Cjmp}, ppc64.ABLE: {Flags: gc.Cjmp}, obj.ARET: {Flags: gc.Break}, obj.ADUFFZERO: {Flags: gc.Call}, obj.ADUFFCOPY: {Flags: gc.Call}, } var initproginfo_initialized int func initproginfo() { var addvariant = []int{V_CC, V_V, V_CC | V_V} if initproginfo_initialized != 0 { return } initproginfo_initialized = 1 // Perform one-time expansion of instructions in progtable to // their CC, V, and VCC variants var as2 int var i int var variant int for as := int(0); as < len(progtable); as++ { if progtable[as].Flags == 0 { continue } variant = as2variant(as) for i = 0; i < len(addvariant); i++ { as2 = variant2as(as, variant|addvariant[i]) if as2 != 0 && progtable[as2].Flags == 0 { progtable[as2] = progtable[as] } } } } func proginfo(p *obj.Prog) { initproginfo() info := &p.Info *info = progtable[p.As] if info.Flags == 0 { gc.Fatalf("proginfo: unknown instruction %v", p) } if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { info.Flags &^= gc.RegRead info.Flags |= gc.RightRead /*CanRegRead |*/ } if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 { info.Regindex |= RtoB(int(p.From.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.From.Reg)) } } if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 { info.Regindex |= RtoB(int(p.To.Reg)) if info.Flags&gc.PostInc != 0 { info.Regset |= RtoB(int(p.To.Reg)) } } if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { info.Flags &^= gc.LeftRead info.Flags |= gc.LeftAddr } if p.As == obj.ADUFFZERO { info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3) info.Regset |= RtoB(ppc64.REG_R3) } if p.As == obj.ADUFFCOPY { // TODO(austin) Revisit when duffcopy is implemented info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5) info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) } } // Instruction variants table. Initially this contains entries only // for the "base" form of each instruction. On the first call to // as2variant or variant2as, we'll add the variants to the table. var varianttable = [ppc64.ALAST][4]int{ ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC}, ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC}, ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC}, ppc64.AADDME: {ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC}, ppc64.AADDZE: {ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC}, ppc64.AAND: {ppc64.AAND, ppc64.AANDCC, 0, 0}, ppc64.AANDN: {ppc64.AANDN, ppc64.AANDNCC, 0, 0}, ppc64.ACNTLZD: {ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0}, ppc64.ACNTLZW: {ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0}, ppc64.ADIVD: {ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC}, ppc64.ADIVDU: {ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC}, ppc64.ADIVW: {ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC}, ppc64.ADIVWU: {ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC}, ppc64.AEQV: {ppc64.AEQV, ppc64.AEQVCC, 0, 0}, ppc64.AEXTSB: {ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0}, ppc64.AEXTSH: {ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0}, ppc64.AEXTSW: {ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0}, ppc64.AFABS: {ppc64.AFABS, ppc64.AFABSCC, 0, 0}, ppc64.AFADD: {ppc64.AFADD, ppc64.AFADDCC, 0, 0}, ppc64.AFADDS: {ppc64.AFADDS, ppc64.AFADDSCC, 0, 0}, ppc64.AFCFID: {ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0}, ppc64.AFCTID: {ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0}, ppc64.AFCTIDZ: {ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0}, ppc64.AFCTIW: {ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0}, ppc64.AFCTIWZ: {ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0}, ppc64.AFDIV: {ppc64.AFDIV, ppc64.AFDIVCC, 0, 0}, ppc64.AFDIVS: {ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0}, ppc64.AFMADD: {ppc64.AFMADD, ppc64.AFMADDCC, 0, 0}, ppc64.AFMADDS: {ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0}, ppc64.AFMOVD: {ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0}, ppc64.AFMSUB: {ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0}, ppc64.AFMSUBS: {ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0}, ppc64.AFMUL: {ppc64.AFMUL, ppc64.AFMULCC, 0, 0}, ppc64.AFMULS: {ppc64.AFMULS, ppc64.AFMULSCC, 0, 0}, ppc64.AFNABS: {ppc64.AFNABS, ppc64.AFNABSCC, 0, 0}, ppc64.AFNEG: {ppc64.AFNEG, ppc64.AFNEGCC, 0, 0}, ppc64.AFNMADD: {ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0}, ppc64.AFNMADDS: {ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0}, ppc64.AFNMSUB: {ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0}, ppc64.AFNMSUBS: {ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0}, ppc64.AFRES: {ppc64.AFRES, ppc64.AFRESCC, 0, 0}, ppc64.AFRSP: {ppc64.AFRSP, ppc64.AFRSPCC, 0, 0}, ppc64.AFRSQRTE: {ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0}, ppc64.AFSEL: {ppc64.AFSEL, ppc64.AFSELCC, 0, 0}, ppc64.AFSQRT: {ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0}, ppc64.AFSQRTS: {ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0}, ppc64.AFSUB: {ppc64.AFSUB, ppc64.AFSUBCC, 0, 0}, ppc64.AFSUBS: {ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0}, ppc64.AMTFSB0: {ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0}, ppc64.AMTFSB1: {ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0}, ppc64.AMULHD: {ppc64.AMULHD, ppc64.AMULHDCC, 0, 0}, ppc64.AMULHDU: {ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0}, ppc64.AMULHW: {ppc64.AMULHW, ppc64.AMULHWCC, 0, 0}, ppc64.AMULHWU: {ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0}, ppc64.AMULLD: {ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC}, ppc64.AMULLW: {ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC}, ppc64.ANAND: {ppc64.ANAND, ppc64.ANANDCC, 0, 0}, ppc64.ANEG: {ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC}, ppc64.ANOR: {ppc64.ANOR, ppc64.ANORCC, 0, 0}, ppc64.AOR: {ppc64.AOR, ppc64.AORCC, 0, 0}, ppc64.AORN: {ppc64.AORN, ppc64.AORNCC, 0, 0}, ppc64.AREM: {ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC}, ppc64.AREMD: {ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC}, ppc64.AREMDU: {ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC}, ppc64.AREMU: {ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC}, ppc64.ARLDC: {ppc64.ARLDC, ppc64.ARLDCCC, 0, 0}, ppc64.ARLDCL: {ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0}, ppc64.ARLDCR: {ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0}, ppc64.ARLDMI: {ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0}, ppc64.ARLWMI: {ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0}, ppc64.ARLWNM: {ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0}, ppc64.ASLD: {ppc64.ASLD, ppc64.ASLDCC, 0, 0}, ppc64.ASLW: {ppc64.ASLW, ppc64.ASLWCC, 0, 0}, ppc64.ASRAD: {ppc64.ASRAD, ppc64.ASRADCC, 0, 0}, ppc64.ASRAW: {ppc64.ASRAW, ppc64.ASRAWCC, 0, 0}, ppc64.ASRD: {ppc64.ASRD, ppc64.ASRDCC, 0, 0}, ppc64.ASRW: {ppc64.ASRW, ppc64.ASRWCC, 0, 0}, ppc64.ASUB: {ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC}, ppc64.ASUBC: {ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC}, ppc64.ASUBE: {ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC}, ppc64.ASUBME: {ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC}, ppc64.ASUBZE: {ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC}, ppc64.AXOR: {ppc64.AXOR, ppc64.AXORCC, 0, 0}, } var initvariants_initialized int func initvariants() { if initvariants_initialized != 0 { return } initvariants_initialized = 1 var j int for i := int(0); i < len(varianttable); i++ { if varianttable[i][0] == 0 { // Instruction has no variants varianttable[i][0] = i continue } // Copy base form to other variants if varianttable[i][0] == i { for j = 0; j < len(varianttable[i]); j++ { varianttable[varianttable[i][j]] = varianttable[i] } } } } // as2variant returns the variant (V_*) flags of instruction as. func as2variant(as int) int { initvariants() for i := int(0); i < len(varianttable[as]); i++ { if varianttable[as][i] == as { return i } } gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as)) return 0 } // variant2as returns the instruction as with the given variant (V_*) flags. // If no such variant exists, this returns 0. func variant2as(as int, flags int) int { initvariants() return varianttable[as][flags] }
Java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_int_loop_04.cpp Label Definition File: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805.label.xml Template File: sources-sink-04.tmpl.cpp */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: Allocate using new[] and set data pointer to a small buffer * GoodSource: Allocate using new[] and set data pointer to a large buffer * Sink: loop * BadSink : Copy int array to data using a loop * Flow Variant: 04 Control flow: if(STATIC_CONST_TRUE) and if(STATIC_CONST_FALSE) * * */ #include "std_testcase.h" /* The two variables below are declared "const", so a tool should be able to identify that reads of these will always return their initialized values. */ static const int STATIC_CONST_TRUE = 1; /* true */ static const int STATIC_CONST_FALSE = 0; /* false */ namespace CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_int_loop_04 { #ifndef OMITBAD void bad() { int * data; data = NULL; if(STATIC_CONST_TRUE) { /* FLAW: Allocate using new[] and point data to a small buffer that is smaller than the large buffer used in the sinks */ data = new int[50]; } { int source[100] = {0}; /* fill with 0's */ { size_t i; /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ for (i = 0; i < 100; i++) { data[i] = source[i]; } printIntLine(data[0]); delete [] data; } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the STATIC_CONST_TRUE to STATIC_CONST_FALSE */ static void goodG2B1() { int * data; data = NULL; if(STATIC_CONST_FALSE) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Allocate using new[] and point data to a large buffer that is at least as large as the large buffer used in the sink */ data = new int[100]; } { int source[100] = {0}; /* fill with 0's */ { size_t i; /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ for (i = 0; i < 100; i++) { data[i] = source[i]; } printIntLine(data[0]); delete [] data; } } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { int * data; data = NULL; if(STATIC_CONST_TRUE) { /* FIX: Allocate using new[] and point data to a large buffer that is at least as large as the large buffer used in the sink */ data = new int[100]; } { int source[100] = {0}; /* fill with 0's */ { size_t i; /* POTENTIAL FLAW: Possible buffer overflow if data < 100 */ for (i = 0; i < 100; i++) { data[i] = source[i]; } printIntLine(data[0]); delete [] data; } } } void good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_int_loop_04; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
Java
using System; using System.Collections; namespace MyGeneration.CodeSmithConversion.Template { public enum CstTokenType { Code = 0, ResponseWriteShortcutCode, RunAtServerCode, Literal, Comment, EscapedStartTag, EscapedEndTag } /// <summary> /// Summary description for CstToken. /// </summary> public class CstToken { private string text; private CstTokenType tokenType = CstTokenType.Literal; public CstToken(CstTokenType tokenType, string text) { this.tokenType = tokenType; this.text = text; } public string Text { get { return text; } set { text = value; } } public CstTokenType TokenType { get { return tokenType; } set { tokenType = value; } } } }
Java
<div> <a href="#" title="Click or press enter to display help" class="dropdown standalone-help" data-toggle="reset-page-help" tab-index="0"> <span class="icon-circle"><i class="fa fa-info"></i></span> </a> </div> <div class="dropdown-pane" id="reset-page-help" data-dropdown data-hover="true" data-hover-pane="true" data-auto-focus="true"> <p>If you cannot complete the password reset form, please bring University-issued photo identification to the Bruin OnLine Help Desk located in Suite 124, Kerckhoff Hall. If you are not able to visit the Help Desk, please fax in a completed copy of the <a href="/files/service_request.pdf">UCLA Logon Service Request Form</a>, with all required supporting documentation.</p> </div>
Java
/* * @description Expression is always true via if (unsigned int >= 0) * * */ #include "std_testcase.h" #ifndef OMITBAD void CWE571_Expression_Always_True__unsigned_int_01_bad() { /* Ensure (0 <= intBad < UINT_MAX) and that uIntBad is pseudo-random */ unsigned int uIntBad = (unsigned int)(rand() * 2); /* FLAW: This expression is always true */ if (uIntBad >= 0) { printLine("Always prints"); } } #endif /* OMITBAD */ #ifndef OMITGOOD static void good1() { int intGood = rand(); /* FIX: Possibly evaluate to true */ if (intGood > (RAND_MAX / 2)) { printLine("Sometimes prints"); } } void CWE571_Expression_Always_True__unsigned_int_01_good() { good1(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE571_Expression_Always_True__unsigned_int_01_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE571_Expression_Always_True__unsigned_int_01_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
Java
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_WEBUI_OPTIONS_CHROMEOS_STATS_OPTIONS_HANDLER_H_ #define CHROME_BROWSER_UI_WEBUI_OPTIONS_CHROMEOS_STATS_OPTIONS_HANDLER_H_ #pragma once #include "base/compiler_specific.h" #include "chrome/browser/ui/webui/options/options_ui.h" namespace chromeos { // ChromeOS handler for "Stats/crash reporting to Google" option of the Advanced // settings page. This handler does only ChromeOS-specific actions while default // code is in Chrome's AdvancedOptionsHandler // (chrome/browser/webui/advanced_options_handler.cc). class StatsOptionsHandler : public OptionsPageUIHandler { public: StatsOptionsHandler(); // OptionsPageUIHandler implementation. virtual void GetLocalizedValues( base::DictionaryValue* localized_strings) OVERRIDE; virtual void Initialize() OVERRIDE; // WebUIMessageHandler implementation. virtual void RegisterMessages() OVERRIDE; private: void HandleMetricsReportingCheckbox(const base::ListValue* args); DISALLOW_COPY_AND_ASSIGN(StatsOptionsHandler); }; } // namespace chromeos #endif // CHROME_BROWSER_UI_WEBUI_OPTIONS_CHROMEOS_STATS_OPTIONS_HANDLER_H_
Java
ENV['RAILS_ENV'] ||= 'test' if ENV['TRAVIS'] require 'coveralls' Coveralls.wear!('rails') SimpleCov.start do add_filter '.bundle' add_filter 'spec' end end require 'spec_helper' require File.expand_path('../../config/environment', __FILE__) require 'rspec/rails' require 'rspec/its' require 'shoulda/matchers' require 'capybara/poltergeist' Capybara.javascript_driver = :poltergeist Capybara.default_wait_time = 30 Rails.logger.level = 4 # Requires supporting ruby files with custom matchers and macros, etc, in # spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are # run as spec files by default. This means that files in spec/support that end # in _spec.rb will both be required and run as specs, causing the specs to be # run twice. It is recommended that you do not name files matching this glob to # end with _spec.rb. You can configure this pattern with the --pattern # option on the command line or in ~/.rspec, .rspec or `.rspec-local`. Dir[Rails.root.join('spec/support/**/*.rb')].each { |f| require f } # Checks for pending migrations before tests are run. ActiveRecord::Migration.maintain_test_schema! RSpec.configure do |config| config.include FactoryGirl::Syntax::Methods config.include Features::SessionHelpers, type: :feature config.include Features::FormHelpers, type: :feature config.include Features::ScheduleHelpers, type: :feature config.include Features::PhoneHelpers, type: :feature config.include Features::ContactHelpers, type: :feature config.include Requests::RequestHelpers, type: :request config.include DefaultHeaders, type: :request config.include MailerMacros # rspec-rails 3+ will no longer automatically infer an example group's spec # type from the file location. You can explicitly opt-in to this feature by # uncommenting the setting below. config.infer_spec_type_from_file_location! # Remove this line if you're not using ActiveRecord or ActiveRecord fixtures config.fixture_path = "#{::Rails.root}/spec/support/fixtures" # If you're not using ActiveRecord, or you'd prefer not to run each of your # examples within a transaction, remove the following line or assign false # instead of true. config.use_transactional_fixtures = false # require 'active_record_spec_helper' end
Java
/** * Copyright (c) 2013-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * * @emails oncall+jsinfra */ 'use strict'; jest.unmock('everyObject'); var everyObject = require('everyObject'); describe('everyObject', function() { var mockObject; var mockCallback; beforeEach(() => { mockObject = {foo: 1, bar: 2, baz: 3}; mockCallback = jest.fn(); }); it('handles null', () => { everyObject(null, mockCallback); expect(mockCallback).not.toBeCalled(); }); it('returns true if all properties pass the test', () => { mockCallback.mockImplementation(() => true); var result = everyObject(mockObject, mockCallback); expect(result).toBeTruthy(); expect(mockCallback.mock.calls).toEqual([ [1, 'foo', mockObject], [2, 'bar', mockObject], [3, 'baz', mockObject] ]); }); it('returns false if any of the properties fail the test', () => { mockCallback.mockImplementation(() => false); var result = everyObject(mockObject, mockCallback); expect(result).toBeFalsy(); expect(mockCallback).toBeCalled(); }); it('returns immediately upon finding a property that fails the test', () => { mockCallback.mockImplementation(() => false); var result = everyObject(mockObject, mockCallback); expect(result).toBeFalsy(); expect(mockCallback.mock.calls.length).toEqual(1); }); });
Java
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_VIEWS_OMNIBOX_OMNIBOX_POPUP_CONTENTS_VIEW_H_ #define CHROME_BROWSER_UI_VIEWS_OMNIBOX_OMNIBOX_POPUP_CONTENTS_VIEW_H_ #include <stddef.h> #include "base/memory/weak_ptr.h" #include "components/omnibox/browser/omnibox_popup_selection.h" #include "components/omnibox/browser/omnibox_popup_view.h" #include "components/prefs/pref_change_registrar.h" #include "ui/base/metadata/metadata_header_macros.h" #include "ui/base/window_open_disposition.h" #include "ui/gfx/font_list.h" #include "ui/gfx/image/image.h" #include "ui/views/view.h" #include "ui/views/widget/widget_observer.h" struct AutocompleteMatch; class LocationBarView; class OmniboxEditModel; class OmniboxResultView; class OmniboxViewViews; class WebUIOmniboxPopupView; // A view representing the contents of the autocomplete popup. class OmniboxPopupContentsView : public views::View, public OmniboxPopupView, public views::WidgetObserver { public: METADATA_HEADER(OmniboxPopupContentsView); OmniboxPopupContentsView(OmniboxViewViews* omnibox_view, OmniboxEditModel* edit_model, LocationBarView* location_bar_view); OmniboxPopupContentsView(const OmniboxPopupContentsView&) = delete; OmniboxPopupContentsView& operator=(const OmniboxPopupContentsView&) = delete; ~OmniboxPopupContentsView() override; // Opens a match from the list specified by |index| with the type of tab or // window specified by |disposition|. void OpenMatch(WindowOpenDisposition disposition, base::TimeTicks match_selection_timestamp); void OpenMatch(size_t index, WindowOpenDisposition disposition, base::TimeTicks match_selection_timestamp); // Returns the icon that should be displayed next to |match|. If the icon is // available as a vector icon, it will be |vector_icon_color|. gfx::Image GetMatchIcon(const AutocompleteMatch& match, SkColor vector_icon_color) const; // Sets the line specified by |index| as selected and, if |index| is // different than the previous index, sets the line state to NORMAL. virtual void SetSelectedIndex(size_t index); // Returns the selected line. // Note: This and `SetSelectedIndex` above are used by property // metadata and must follow the metadata conventions. virtual size_t GetSelectedIndex() const; // Returns current popup selection (includes line index). virtual OmniboxPopupSelection GetSelection() const; // Called by the active result view to inform model (due to mouse event). void UnselectButton(); // Gets the OmniboxResultView for match |i|. OmniboxResultView* result_view_at(size_t i); // Currently selected OmniboxResultView, or nullptr if nothing is selected. OmniboxResultView* GetSelectedResultView(); // Returns whether we're in experimental keyword mode and the input gives // sufficient confidence that the user wants keyword mode. bool InExplicitExperimentalKeywordMode(); // OmniboxPopupView: bool IsOpen() const override; void InvalidateLine(size_t line) override; void OnSelectionChanged(OmniboxPopupSelection old_selection, OmniboxPopupSelection new_selection) override; void UpdatePopupAppearance() override; void ProvideButtonFocusHint(size_t line) override; void OnMatchIconUpdated(size_t match_index) override; void OnDragCanceled() override; // views::View: bool OnMouseDragged(const ui::MouseEvent& event) override; void OnGestureEvent(ui::GestureEvent* event) override; void GetAccessibleNodeData(ui::AXNodeData* node_data) override; // views::WidgetObserver: void OnWidgetBoundsChanged(views::Widget* widget, const gfx::Rect& new_bounds) override; void FireAXEventsForNewActiveDescendant(View* descendant_view); private: friend class OmniboxPopupContentsViewTest; friend class OmniboxSuggestionButtonRowBrowserTest; class AutocompletePopupWidget; // Returns the target popup bounds in screen coordinates based on the bounds // of |location_bar_view_|. gfx::Rect GetTargetBounds() const; // Returns true if the model has a match at the specified index. bool HasMatchAt(size_t index) const; // Returns the match at the specified index within the model. const AutocompleteMatch& GetMatchAtIndex(size_t index) const; // Find the index of the match under the given |point|, specified in window // coordinates. Returns OmniboxPopupSelection::kNoMatch if there isn't a match // at the specified point. size_t GetIndexForPoint(const gfx::Point& point); // Update which result views are visible when the group visibility changes. void OnSuggestionGroupVisibilityUpdate(); // Gets the pref service for this view. May return nullptr in tests. PrefService* GetPrefService() const; // The popup that contains this view. We create this, but it deletes itself // when its window is destroyed. This is a WeakPtr because it's possible for // the OS to destroy the window and thus delete this object before we're // deleted, or without our knowledge. base::WeakPtr<AutocompletePopupWidget> popup_; // The edit view that invokes us. OmniboxViewViews* omnibox_view_; // The location bar view that owns |omnibox_view_|. May be nullptr in tests. LocationBarView* location_bar_view_; // The child WebView for the suggestions. This only exists if the // omnibox::kWebUIOmniboxPopup flag is on. WebUIOmniboxPopupView* webui_view_ = nullptr; // A pref change registrar for toggling result view visibility. PrefChangeRegistrar pref_change_registrar_; OmniboxEditModel* edit_model_; }; #endif // CHROME_BROWSER_UI_VIEWS_OMNIBOX_OMNIBOX_POPUP_CONTENTS_VIEW_H_
Java
<!DOCTYPE html> <html dir="ltr" lang="pl"> <head> <title>Foreign Function Interface - Rubinius</title> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta content='pl' http-equiv='content-language'> <meta content='Rubinius is an implementation of the Ruby programming language. The Rubinius bytecode virtual machine is written in C++. The bytecode compiler is written in pure Ruby. The vast majority of the core library is also written in Ruby, with some supporting primitives that interact with the VM directly.' name='description'> <link href='/' rel='home'> <link href='/' rel='start'> <link href='/doc/pl/systems/primitives' rel='prev' title='Primitives'> <link href='/doc/pl/systems/concurrency' rel='next' title='Współbieżność'> <!--[if IE]><script src="http://html5shiv.googlecode.com/svn/trunk/html5.js" type="text/javascript"></script><![endif]--> <script src="/javascripts/jquery-1.3.2.js"></script> <script src="/javascripts/paging_keys.js"></script> <script src="/javascripts/application.js"></script> <style>article, aside, dialog, figure, footer, header, hgroup, menu, nav, section { display: block; }</style> <link href="/stylesheets/blueprint/screen.css" media="screen" rel="stylesheet" /> <link href="/stylesheets/application.css" media="screen" rel="stylesheet" /> <link href="/stylesheets/blueprint/print.css" media="print" rel="stylesheet" /> <!--[if IE]><link href="/stylesheets/blueprint/ie.css" media="screen" rel="stylesheet" type="text/css" /><![endif]--> <!--[if IE]><link href="/stylesheets/ie.css" media="screen" rel="stylesheet" type="text/css" /><![endif]--> <link href="/stylesheets/pygments.css" media="screen" rel="stylesheet" /> </head> <body> <div class='container'> <div class='span-21 doc_menu'> <header> <nav> <ul> <li><a href="/">Home</a></li> <li><a id="blog" href="/blog">Blog</a></li> <li><a id="documentation" href="/doc/en">Documentation</a></li> <li><a href="/projects">Projects</a></li> <li><a href="/roadmap">Roadmap</a></li> <li><a href="/releases">Releases</a></li> </ul> </nav> </header> </div> <div class='span-3 last'> <div id='version'> <a href="/releases/1.2.4">1.2.4</a> </div> </div> </div> <div class="container languages"> <nav> <span class="label">Język:</span> <ul> <li><a href="/doc/de/systems/ffi/" >de</a></li> <li><a href="/doc/en/systems/ffi/" >en</a></li> <li><a href="/doc/es/systems/ffi/" >es</a></li> <li><a href="/doc/fr/systems/ffi/" >fr</a></li> <li><a href="/doc/ja/systems/ffi/" >ja</a></li> <li><a href="/doc/pl/systems/ffi/" class="current" >pl</a></li> <li><a href="/doc/pt-br/systems/ffi/" >pt-br</a></li> <li><a href="/doc/ru/systems/ffi/" >ru</a></li> </ul> </nav> </div> <div class="container doc_page_nav"> <span class="label">Wstecz:</span> <a href="/doc/pl/systems/primitives">Primitives</a> <span class="label">Do góry:</span> <a href="/doc/pl/">Spis treści</a> <span class="label">Dalej:</span> <a href="/doc/pl/systems/concurrency">Współbieżność</a> </div> <div class="container documentation"> <h2>Foreign Function Interface</h2> <div class="review"> <p>This topic has missing or partial documentation. Please help us improve it.</p> <p> See <a href="/doc/pl/how-to/write-documentation">How-To - Write Documentation</a> </p> </div> </div> <div class="container doc_page_nav"> <span class="label">Wstecz:</span> <a href="/doc/pl/systems/primitives">Primitives</a> <span class="label">Do góry:</span> <a href="/doc/pl/">Spis treści</a> <span class="label">Dalej:</span> <a href="/doc/pl/systems/concurrency">Współbieżność</a> </div> <div class="container"> <div id="disqus_thread"></div> <script type="text/javascript"> var disqus_shortname = 'rubinius'; var disqus_identifier = '/doc/pl/systems/ffi/'; var disqus_url = 'http://rubini.us/doc/pl/systems/ffi/'; (function() { var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; dsq.src = 'http://' + disqus_shortname + '.disqus.com/embed.js'; (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); })(); </script> <noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript> </div> <footer> <div class='container'> <nav> <ul> <li><a rel="external" href="http://twitter.com/rubinius">Follow Rubinius on Twitter</a></li> <li><a rel="external" href="http://github.com/rubinius/rubinius">Fork Rubinius on github</a></li> <li><a rel="external" href="http://engineyard.com">An Engine Yard project</a></li> </ul> </nav> </div> </footer> <script> var _gaq=[['_setAccount','UA-12328521-1'],['_trackPageview']]; (function(d,t){var g=d.createElement(t),s=d.getElementsByTagName(t)[0];g.async=1; g.src=('https:'==location.protocol?'//ssl':'//www')+'.google-analytics.com/ga.js'; s.parentNode.insertBefore(g,s)}(document,'script')); </script> </body> </html>
Java
/* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include "UmbrellaProtocol.h" #include <folly/Bits.h> #include "mcrouter/lib/McReply.h" #include "mcrouter/lib/McRequest.h" #include "mcrouter/lib/mc/umbrella.h" #ifndef LIBMC_FBTRACE_DISABLE #include "mcrouter/lib/mc/mc_fbtrace_info.h" #endif static_assert( mc_nops == 27, "If you add a new mc_op, make sure to update lib/mc/umbrella_conv.h"); static_assert( UM_NOPS == 28, "If you add a new mc_op, make sure to update lib/mc/umbrella_conv.h"); static_assert( mc_nres == 31, "If you add a new mc_res, make sure to update lib/mc/umbrella_conv.h"); namespace facebook { namespace memcache { UmbrellaParseStatus umbrellaParseHeader(const uint8_t* buf, size_t nbuf, UmbrellaMessageInfo& infoOut) { if (nbuf < sizeof(entry_list_msg_t)) { return UmbrellaParseStatus::NOT_ENOUGH_DATA; } entry_list_msg_t* header = (entry_list_msg_t*) buf; if (header->msg_header.magic_byte != ENTRY_LIST_MAGIC_BYTE) { return UmbrellaParseStatus::MESSAGE_PARSE_ERROR; } infoOut.version = static_cast<UmbrellaVersion>(header->msg_header.version); if (infoOut.version == UmbrellaVersion::BASIC) { /* Basic version layout: }0NNSSSS, <um_elist_entry_t>*nentries, body Where N is nentries and S is message size, both big endian */ size_t messageSize = folly::Endian::big<uint32_t>(header->total_size); uint16_t nentries = folly::Endian::big<uint16_t>(header->nentries); infoOut.headerSize = sizeof(entry_list_msg_t) + sizeof(um_elist_entry_t) * nentries; if (infoOut.headerSize > messageSize) { return UmbrellaParseStatus::MESSAGE_PARSE_ERROR; } infoOut.bodySize = messageSize - infoOut.headerSize; } else if (infoOut.version == UmbrellaVersion::TYPED_REQUEST) { /* Typed request layout: }1TTSSSSFFFFRRRR, body Where T is type ID, S is message size, F is flags and R is reqid (all little-endian) */ size_t messageSize = folly::Endian::little<uint32_t>(header->total_size); infoOut.typeId = folly::Endian::little<uint16_t>(header->nentries); infoOut.headerSize = sizeof(entry_list_msg_t) + sizeof(uint32_t) + sizeof(uint32_t); if (infoOut.headerSize > messageSize) { return UmbrellaParseStatus::MESSAGE_PARSE_ERROR; } infoOut.bodySize = messageSize - infoOut.headerSize; } else { return UmbrellaParseStatus::MESSAGE_PARSE_ERROR; } return UmbrellaParseStatus::OK; } uint64_t umbrellaDetermineReqId(const uint8_t* header, size_t nheader) { auto msg = reinterpret_cast<const entry_list_msg_t*>(header); size_t nentries = folly::Endian::big((uint16_t)msg->nentries); if (reinterpret_cast<const uint8_t*>(&msg->entries[nentries]) != header + nheader) { throw std::runtime_error("Invalid number of entries"); } for (size_t i = 0; i < nentries; ++i) { auto& entry = msg->entries[i]; size_t tag = folly::Endian::big((uint16_t)entry.tag); if (tag == msg_reqid) { uint64_t val = folly::Endian::big((uint64_t)entry.data.val); if (val == 0) { throw std::runtime_error("invalid reqid"); } return val; } } throw std::runtime_error("missing reqid"); } McRequest umbrellaParseRequest(const folly::IOBuf& source, const uint8_t* header, size_t nheader, const uint8_t* body, size_t nbody, mc_op_t& opOut, uint64_t& reqidOut) { McRequest req; opOut = mc_op_unknown; reqidOut = 0; auto msg = reinterpret_cast<const entry_list_msg_t*>(header); size_t nentries = folly::Endian::big((uint16_t)msg->nentries); if (reinterpret_cast<const uint8_t*>(&msg->entries[nentries]) != header + nheader) { throw std::runtime_error("Invalid number of entries"); } for (size_t i = 0; i < nentries; ++i) { auto& entry = msg->entries[i]; size_t tag = folly::Endian::big((uint16_t)entry.tag); size_t val = folly::Endian::big((uint64_t)entry.data.val); switch (tag) { case msg_op: if (val >= UM_NOPS) { throw std::runtime_error("op out of range"); } opOut = static_cast<mc_op_t>(umbrella_op_to_mc[val]); break; case msg_reqid: if (val == 0) { throw std::runtime_error("invalid reqid"); } reqidOut = val; break; case msg_flags: req.setFlags(val); break; case msg_exptime: req.setExptime(val); break; case msg_delta: req.setDelta(val); break; case msg_cas: req.setCas(val); break; case msg_lease_id: req.setLeaseToken(val); break; case msg_key: if (!req.setKeyFrom( source, body + folly::Endian::big((uint32_t)entry.data.str.offset), folly::Endian::big((uint32_t)entry.data.str.len) - 1)) { throw std::runtime_error("Key: invalid offset/length"); } break; case msg_value: if (!req.setValueFrom( source, body + folly::Endian::big((uint32_t)entry.data.str.offset), folly::Endian::big((uint32_t)entry.data.str.len) - 1)) { throw std::runtime_error("Value: invalid offset/length"); } break; #ifndef LIBMC_FBTRACE_DISABLE case msg_fbtrace: { auto off = folly::Endian::big((uint32_t)entry.data.str.offset); auto len = folly::Endian::big((uint32_t)entry.data.str.len) - 1; if (len > FBTRACE_METADATA_SZ) { throw std::runtime_error("Fbtrace metadata too large"); } if (off + len > nbody || off + len < off) { throw std::runtime_error("Fbtrace metadata field invalid"); } auto fbtraceInfo = new_mc_fbtrace_info(0); memcpy(fbtraceInfo->metadata, body + off, len); req.setFbtraceInfo(fbtraceInfo); break; } #endif default: /* Ignore unknown tags silently */ break; } } if (opOut == mc_op_unknown) { throw std::runtime_error("Request missing operation"); } if (!reqidOut) { throw std::runtime_error("Request missing reqid"); } return req; } UmbrellaSerializedMessage::UmbrellaSerializedMessage() { /* These will not change from message to message */ msg_.msg_header.magic_byte = ENTRY_LIST_MAGIC_BYTE; msg_.msg_header.version = UMBRELLA_VERSION_BASIC; iovs_[0].iov_base = &msg_; iovs_[0].iov_len = sizeof(msg_); iovs_[1].iov_base = entries_; } void UmbrellaSerializedMessage::clear() { nEntries_ = nStrings_ = offset_ = 0; error_ = false; } bool UmbrellaSerializedMessage::prepare(const McReply& reply, mc_op_t op, uint64_t reqid, struct iovec*& iovOut, size_t& niovOut) { niovOut = 0; appendInt(I32, msg_op, umbrella_op_from_mc[op]); appendInt(U64, msg_reqid, reqid); appendInt(I32, msg_result, umbrella_res_from_mc[reply.result()]); if (reply.appSpecificErrorCode()) { appendInt(I32, msg_err_code, reply.appSpecificErrorCode()); } if (reply.flags()) { appendInt(U64, msg_flags, reply.flags()); } if (reply.exptime()) { appendInt(U64, msg_exptime, reply.exptime()); } if (reply.delta()) { appendInt(U64, msg_delta, reply.delta()); } if (reply.leaseToken()) { appendInt(U64, msg_lease_id, reply.leaseToken()); } if (reply.cas()) { appendInt(U64, msg_cas, reply.cas()); } if (reply.number()) { appendInt(U64, msg_number, reply.number()); } /* TODO: if we intend to pass chained IOBufs as values, we can optimize this to write multiple iovs directly */ if (reply.hasValue()) { auto valueRange = reply.valueRangeSlow(); appendString(msg_value, reinterpret_cast<const uint8_t*>(valueRange.begin()), valueRange.size()); } /* NOTE: this check must come after all append*() calls */ if (error_) { return false; } niovOut = finalizeMessage(); iovOut = iovs_; return true; } void UmbrellaSerializedMessage::appendInt( entry_type_t type, int32_t tag, uint64_t val) { if (nEntries_ >= kInlineEntries) { error_ = true; return; } um_elist_entry_t& entry = entries_[nEntries_++]; entry.type = folly::Endian::big((uint16_t)type); entry.tag = folly::Endian::big((uint16_t)tag); entry.data.val = folly::Endian::big((uint64_t)val); } void UmbrellaSerializedMessage::appendString( int32_t tag, const uint8_t* data, size_t len, entry_type_t type) { if (nStrings_ >= kInlineStrings) { error_ = true; return; } strings_[nStrings_++] = folly::StringPiece((const char*)data, len); um_elist_entry_t& entry = entries_[nEntries_++]; entry.type = folly::Endian::big((uint16_t)type); entry.tag = folly::Endian::big((uint16_t)tag); entry.data.str.offset = folly::Endian::big((uint32_t)offset_); entry.data.str.len = folly::Endian::big((uint32_t)(len + 1)); offset_ += len + 1; } size_t UmbrellaSerializedMessage::finalizeMessage() { static char nul = '\0'; size_t size = sizeof(entry_list_msg_t) + sizeof(um_elist_entry_t) * nEntries_ + offset_; msg_.total_size = folly::Endian::big((uint32_t)size); msg_.nentries = folly::Endian::big((uint16_t)nEntries_); iovs_[1].iov_len = sizeof(um_elist_entry_t) * nEntries_; size_t niovOut = 2; for (size_t i = 0; i < nStrings_; i++) { iovs_[niovOut].iov_base = (char *)strings_[i].begin(); iovs_[niovOut].iov_len = strings_[i].size(); niovOut++; iovs_[niovOut].iov_base = &nul; iovs_[niovOut].iov_len = 1; niovOut++; } return niovOut; } }}
Java
/* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // // This software is a modification of: // // https://github.com/nmcclatchey/Priority-Deque/blob/master/priority_deque.hpp // // /*----------------------------------------------------------------------------*\ | Copyright (C) 2012-2013 Nathaniel McClatchey | | Released under the Boost Software License Version 1.0, which may be found | | at http://www.boost.org/LICENSE_1_0.txt | \*----------------------------------------------------------------------------*/ /*! @file priority_deque.h // priority_deque.hpp provides the class priority_deque as a thin wrapper // around the functions provided by interval_heap.hpp. // @remark Exception-safety: If the means of movement -- move, copy, or swap, // depending on exception specifications -- do not throw, the guarantee is as // strong as that of the action on the underlying container. (Unless otherwise // specified) // @note Providing a stronger guarantee is impractical. */ #pragma once #include <nvbio/basic/types.h> #include <nvbio/basic/interval_heap.h> // Default comparison (std::less) #include <functional> // Default container (std::vector) #include <vector> namespace nvbio { /// \page priority_deques_page Priority Deques /// /// This module implements a priority deque adaptor, allowing to push/pop from both ends of the container: /// /// - priority_deque /// /// \section ExampleSection Example /// ///\code /// // build a simple priority_deque over 4 integers /// typedef vector_view<uint32*> vector_type; /// typedef priority_deque<uint32, vector_type> deque_type; /// /// uint32 deque_storage[4] = { 5, 3, 8, 1 } /// /// // construct the deque /// deque_type deque( vector_type( 4u, deque_storage ) ); /// /// // pop from both ends /// printf( "%u\n", deque.top() ); // -> 8 /// deque.pop_top(); /// printf( "%u\n", deque.bottom() ); // -> 1 /// deque.pop_bottom(); /// printf( "%u\n", deque.top() ); // -> 5 /// deque.pop_top(); /// /// // perhaps push one more item /// deque.push( 7 ); /// /// // and keep popping /// printf( "%u\n", deque.bottom() ); // -> 3 /// deque.pop_bottom(); /// printf( "%u\n", deque.bottom() ); // -> 7 /// deque.pop_bottom(); ///\endcode /// ///@addtogroup Basic ///@{ ///@defgroup PriorityDeques Priority Deques /// This module implements a priority deque adaptor, allowing to push/pop from both ends of the container. ///@{ //! @brief Efficient double-ended priority queue. template <typename Type, typename Sequence =std::vector<Type>, typename Compare =::std::less<typename Sequence::value_type> > class priority_deque; //! @brief Swaps the elements of two priority deques. template <typename Type, typename Sequence, typename Compare> void swap (priority_deque<Type, Sequence, Compare>& deque1, priority_deque<Type, Sequence, Compare>& deque2); //----------------------------Priority Deque Class-----------------------------| /*! @brief Efficient double-ended priority queue. * @author Nathaniel McClatchey * @copyright Boost Software License Version 1.0 * @param Type Type of elements in the priority deque. * @param Sequence Underlying sequence container. Must provide random-access * iterators, %front(), %push_back(Type const &), and %pop_back(). * Defaults to std::vector<Type>. * @param Compare Comparison class. %Compare(A, B) should return true if %A * should be placed earlier than %B in a strict weak ordering. * Defaults to std::less<Type>, which encapsulates operator<. * @details Priority deques are adaptors, designed to provide efficient * insertion and access to both ends of a weakly-ordered list of elements. * As a container adaptor, priority_deque is implemented on top of another * container type. By default, this is std::vector, but a different container * may be specified explicitly. * Although the priority deque does permit iteration through its elements, * there is no ordering guaranteed, as different implementations may benefit * from different structures, and all iterators should be discarded after using * any function not labeled const. * @note %priority_deque does not provide a stable ordering. If both A<B and * B<A are false, then the order in which they appear may differ from the order * in which they were added to the priority deque. * @remark %priority_deque replicates the interface of the STL * @a priority_queue class template. * @remark %priority_deque is most useful when removals are interspersed with * insertions. If no further insertions are to be performed after the first * removal, consider using an array and a sorting algorithm instead. * @remark %priority_deque sorts elements as they are added, removed, and * modified by its member functions. If the elements are modified by some means * other than the public member functions, the order must be restoreed before * the priority_deque is used. * @see priority_queue */ template <typename Type, typename Sequence, typename Compare> class priority_deque { //----------------------------------Public-------------------------------------| public: //---------------------------------Typedefs------------------------------------| //! @details Underlying container type. typedef Sequence container_type; typedef typename container_type::value_type value_type; typedef Compare value_compare; //! @details STL Container specifies that this is an unsigned integral type. typedef typename container_type::size_type size_type; //! @details May be used to examine, but not modify, an element in the deque. typedef typename container_type::const_reference const_reference; typedef typename container_type::reference reference; typedef typename container_type::pointer pointer; typedef pointer const_pointer; //! @details May be used to examine, but not modify, elements in the deque. typedef typename container_type::const_iterator const_iterator; typedef const_iterator iterator; //! @details STL Container specifies that this is a signed integral type. typedef typename container_type::difference_type difference_type; //-------------------------------Constructors----------------------------------| enum Constructed { CONSTRUCTED }; //! @brief Constructs a new priority deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE explicit priority_deque (const Compare& =Compare(), const Sequence& =Sequence()); template <typename InputIterator> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE priority_deque (InputIterator first, InputIterator last, const Compare& =Compare(), const Sequence& =Sequence()); /// O(1) Creates a new priority deque from an already heapified container /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE priority_deque(const Sequence& seq, const bool constructed = false); /// O(1) Creates a new priority deque from an already heapified container /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE priority_deque(const Sequence& seq, const Constructed flag) : sequence_(seq) {} //-----------------------------Restricted Access-------------------------------| //! @brief Copies an element into the priority deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void push (const value_type&); //!@{ //! @brief Accesses a maximal element in the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_reference maximum (void) const; //! @brief Accesses a minimal element in the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_reference minimum (void) const; //! @details Identical to std::priority_queue top(). @see @a maximum NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_reference top (void) const { return maximum(); }; //! @details Identical to std::priority_queue top(). @see @a maximum NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_reference bottom (void) const { return bottom(); }; //!@} //!@{ //! @brief Removes a maximal element from the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void pop_top (void); //! @brief Removes a minimal element from the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void pop_bottom (void); //! @details Identical to std::priority_queue pop(). @see @a pop_maximum NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void pop (void) { pop_top(); }; //!@} //--------------------------------Deque Size-----------------------------------| //!@{ //! @brief Returns true if the priority deque is empty, false if it is not. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool empty (void) const {return sequence_.empty();}; //! @brief Returns the number of elements in the priority deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE size_type size (void) const {return sequence_.size(); }; //! @brief Returns the maximum number of elements that can be contained. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE size_type max_size (void) const {return sequence_.max_size();}; //!@} //--------------------------Whole-Deque Operations-----------------------------| //! @brief Removes all elements from the priority deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void clear (void) { sequence_.clear(); }; //! @brief Moves the elements from this deque into another, and vice-versa. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void swap (priority_deque<Type, Sequence,Compare>&); //!@{ //! @brief Merges a sequence of elements into the priority deque. template <typename InputIterator> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void merge (InputIterator first, InputIterator last); //! @brief Merges a container's elements into the priority deque. template <typename SourceContainer> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void merge (const SourceContainer& source) { merge(source.begin(), source.end()); } //!@} //-------------------------------Random Access---------------------------------| //!@{ //! @brief Returns a const iterator at the beginning of the sequence. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_iterator begin (void) const {return sequence_.begin();}; //! @brief Returns a const iterator past the end of the sequence. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const_iterator end (void) const { return sequence_.end(); }; //! @brief Modifies a specified element of the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void update (const_iterator, const value_type&); //! @brief Removes a specified element from the deque. NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void erase (const_iterator); //!@} //---------------------------Boost.Heap Concepts-------------------------------| // size() has constant complexity static const bool constant_time_size = true; // priority deque does not have ordered iterators static const bool has_ordered_iterators = false; // priority deque is efficiently mergable static const bool is_mergable = true; // priority deque does not have a stable heap order static const bool is_stable = false; // priority deque does not have a reserve() member static const bool has_reserve = false; //--------------------------------Protected------------------------------------| protected: NVBIO_FORCEINLINE NVBIO_HOST_DEVICE container_type& sequence (void) { return sequence_; }; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const container_type& sequence (void) const { return sequence_; }; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE value_compare& compare (void) { return compare_; }; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const value_compare& compare (void) const { return compare_; }; //---------------------------------Private-------------------------------------| private: Sequence sequence_; Compare compare_; }; //-------------------------------Constructors----------------------------------| //----------------------------Default Constructor------------------------------| /** @param comp Comparison class. // @param seq Container class. // @post Deque contains copies of all elements from @a sequence. // // @remark Complexity: O(n) */ template <typename T, typename S, typename C> priority_deque<T, S, C>::priority_deque (const C& comp, const S& seq) : sequence_(seq), compare_(comp) { heap::make_interval_heap(sequence_.begin(), sequence_.end(), compare_); } template <typename T, typename S, typename C> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE priority_deque<T, S, C>::priority_deque(const S& seq, const bool constructed) : sequence_(seq) { if (!constructed) heap::make_interval_heap(sequence_.begin(), sequence_.end(), compare_); } //---------------------------Create from Iterators-----------------------------| /** @param first,last Range of elements. // @param comp Instance of comparison class. // @param seq Instance of container class. // @post Deque contains copies of all elements in @a sequence (if specified) // and in the range [ @a first, @a last). // // @remark Complexity: O(n) */ template <typename T, typename S, typename C> template <typename InputIterator> priority_deque<T, S, C>::priority_deque (InputIterator first,InputIterator last, const C& comp, const S& seq) : sequence_(seq), compare_(comp) { sequence_.insert(sequence_.end(), first, last); heap::make_interval_heap(sequence_.begin(), sequence_.end(), compare_); } //-----------------------------Restricted Access-------------------------------| //------------------------------Insert / Emplace-------------------------------| /** @param value Element to add the the priority deque. // @post Priority deque contains @a value or a copy of @a value. // @post All iterators and references are invalidated. // // @remark Complexity: O(log n) */ template <typename T, typename Sequence, typename Compare> void priority_deque<T, Sequence, Compare>::push (const value_type& value) { sequence_.push_back(value); heap::push_interval_heap(sequence_.begin(), sequence_.end(), compare_); } //---------------------------Observe Maximum/Minimum---------------------------| /** @return Const reference to a maximal element in the priority deque. // @pre Priority deque contains one or more elements. // @see minimum, pop_maximum // // @remark Complexity: O(1) */ template <typename T, typename Sequence, typename Compare> typename priority_deque<T, Sequence, Compare>::const_reference priority_deque<T, Sequence, Compare>::maximum (void) const { NVBIO_CUDA_DEBUG_ASSERT(!empty(), "Empty priority deque has no maximal element. Reference undefined."); const_iterator it = sequence_.begin() + 1; return (it == sequence_.end()) ? sequence_.front() : *it; } /** @return Const reference to a minimal element in the priority deque. // @pre Priority deque contains one or more elements. // @see maximum, pop_minimum // // @remark Complexity: O(1) */ template <typename T, typename Sequence, typename Compare> typename priority_deque<T, Sequence, Compare>::const_reference priority_deque<T, Sequence, Compare>::minimum (void) const { NVBIO_CUDA_DEBUG_ASSERT(!empty(), "Empty priority deque has no minimal element. Reference undefined."); return sequence_.front(); } //---------------------------Remove Maximum/Minimum----------------------------| /** @pre Priority deque contains one or more elements. // @post A maximal element has been removed from the priority deque. // @post All iterators and references are invalidated. // @see maximum, pop, pop_minimum // // @remark Complexity: O(log n) */ template <typename T, typename Sequence, typename Compare> void priority_deque<T, Sequence, Compare>::pop_bottom (void) { NVBIO_CUDA_DEBUG_ASSERT(!empty(), "Empty priority deque has no maximal element. Removal impossible."); heap::pop_interval_heap_min(sequence_.begin(), sequence_.end(), compare_); sequence_.pop_back(); } /** @pre Priority deque contains one or more elements. // @post A minimal element has been removed from the priority deque. // @post All iterators and references are invalidated. // @see minimum, pop_maximum // // @remark Complexity: O(log n) */ template <typename T, typename Sequence, typename Compare> void priority_deque<T, Sequence, Compare>::pop_top (void) { NVBIO_CUDA_DEBUG_ASSERT(!empty(), "Empty priority deque has no minimal element. Removal undefined."); heap::pop_interval_heap_max(sequence_.begin(), sequence_.end(), compare_); sequence_.pop_back(); } //--------------------------Whole-Deque Operations-----------------------------| //-----------------------------------Merge-------------------------------------| /** @param first,last Input iterators bounding the range [ @a first, @a last) // @post Priority deque contains its original elements, and copies of those in // the range. // @post All iterators and references are invalidated. // // @remark Complexity: O(n) // @remark Exception safety: Basic. */ template <typename T, typename S, typename C> template <typename InputIterator> void priority_deque<T, S, C>::merge (InputIterator first, InputIterator last) { sequence_.insert(sequence_.end(), first, last); heap::make_interval_heap(sequence_.begin(), sequence_.end(), compare_); } //----------------------------Swap Specialization------------------------------| /** @param other Priority deque with which to swap. // @post Deque contains the elements from @a source, and @a source contains the // elements from this deque. // @post All iterators and references are invalidated. // @note Sequence containers are required to have swap functions. // @remark Complexity: O(1) */ template <typename T, typename S, typename C> void priority_deque<T, S, C>::swap (priority_deque<T, S, C>& other) { sequence_.swap(other.sequence_); } /** @relates priority_deque // @param deque1,deque2 Priority deques. // @post @a deque1 contains the elements originally in @a deque2, and @a deque2 // contains the elements originally in @a deque1 // @post All iterators and references are invalidated. // // @remark Complexity: O(1) */ template <typename T, typename S, typename C> void swap (priority_deque<T, S, C>& deque1, priority_deque<T, S, C>& deque2) { deque1.swap(deque2); } //---------------------------Random-Access Mutators----------------------------| /** @param random_it A valid iterator in the range [begin, end). // @param value The new value. // @pre Priority deque contains one or more elements. // @post The element at @a random_it is set to @a value. // @post All iterators and references are invalidated. // @see erase // // Elements within the deque may be unordered. // @remark Complexity: O(log n) // @remark Exception safety: Basic. Exceptions won't lose elements, but may // corrupt the heap. */ template <typename T, typename S, typename C> void priority_deque<T, S, C>::update (const_iterator random_it, const value_type& value) { const difference_type index = random_it - begin(); NVBIO_CUDA_DEBUG_ASSERT((0 <= index) && (index < end() - begin()), "Iterator out of bounds; can't set element."); // Providing the strong guarantee would require saving a copy. *(sequence_.begin() + index) = value; heap::update_interval_heap(sequence_.begin(),sequence_.end(),index,compare_); } /** @param random_it An iterator in the range [begin, end) // @pre Priority deque contains one or more elements. // @post The deque no longer contains the element previously at @a random_it. // @post All iterators and references are invalidated. // @see set // @remark Complexity: O(log n) */ template <typename T, typename Sequence, typename Compare> void priority_deque<T, Sequence, Compare>::erase (const_iterator random_it) { const difference_type index = random_it - begin(); NVBIO_CUDA_DEBUG_ASSERT((0 <= index) && (index < end() - begin()), "Iterator out of bounds; can't erase element."); heap::pop_interval_heap(sequence_.begin(), sequence_.end(),index,compare_); sequence_.pop_back(); } ///@} PriorityDeques ///@} Basic } // namespace nvbio
Java
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * @category Zend * @package Zend_Mail * @subpackage Protocol * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ /** * @namespace */ namespace Zend\Mail; use Zend\Validator\Hostname as HostnameValidator, Zend\Validator, Zend\Mail\Protocol; /** * Zend_Mail_Protocol_Abstract * * Provides low-level methods for concrete adapters to communicate with a remote mail server and track requests and responses. * * @uses \Zend\Mail\Protocol\Exception * @uses \Zend\Validator\ValidatorChain * @uses \Zend\Validator\Hostname\Hostname * @category Zend * @package Zend_Mail * @subpackage Protocol * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @todo Implement proxy settings */ abstract class AbstractProtocol { /** * Mail default EOL string */ const EOL = "\r\n"; /** * Default timeout in seconds for initiating session */ const TIMEOUT_CONNECTION = 30; /** * Maximum of the transaction log * @var integer */ protected $_maximumLog = 64; /** * Hostname or IP address of remote server * @var string */ protected $_host; /** * Port number of connection * @var integer */ protected $_port; /** * Instance of Zend\Validator\ValidatorChain to check hostnames * @var \Zend\Validator\ValidatorChain */ protected $_validHost; /** * Socket connection resource * @var resource */ protected $_socket; /** * Last request sent to server * @var string */ protected $_request; /** * Array of server responses to last request * @var array */ protected $_response; /** * String template for parsing server responses using sscanf (default: 3 digit code and response string) * @var resource * @deprecated Since 1.10.3 */ protected $_template = '%d%s'; /** * Log of mail requests and server responses for a session * @var array */ private $_log = array(); /** * Constructor. * * @param string $host OPTIONAL Hostname of remote connection (default: 127.0.0.1) * @param integer $port OPTIONAL Port number (default: null) * @throws \Zend\Mail\Protocol\Exception * @return void */ public function __construct($host = '127.0.0.1', $port = null) { $this->_validHost = new Validator\ValidatorChain(); $this->_validHost->addValidator(new HostnameValidator(HostnameValidator::ALLOW_ALL)); if (!$this->_validHost->isValid($host)) { throw new Protocol\Exception\RuntimeException(implode(', ', $this->_validHost->getMessages())); } $this->_host = $host; $this->_port = $port; } /** * Class destructor to cleanup open resources * * @return void */ public function __destruct() { $this->_disconnect(); } /** * Set the maximum log size * * @param integer $maximumLog Maximum log size * @return void */ public function setMaximumLog($maximumLog) { $this->_maximumLog = (int) $maximumLog; } /** * Get the maximum log size * * @return int the maximum log size */ public function getMaximumLog() { return $this->_maximumLog; } /** * Create a connection to the remote host * * Concrete adapters for this class will implement their own unique connect scripts, using the _connect() method to create the socket resource. */ abstract public function connect(); /** * Retrieve the last client request * * @return string */ public function getRequest() { return $this->_request; } /** * Retrieve the last server response * * @return array */ public function getResponse() { return $this->_response; } /** * Retrieve the transaction log * * @return string */ public function getLog() { return implode('', $this->_log); } /** * Reset the transaction log * * @return void */ public function resetLog() { $this->_log = array(); } /** * Add the transaction log * * @param string new transaction * @return void */ protected function _addLog($value) { if ($this->_maximumLog >= 0 && count($this->_log) >= $this->_maximumLog) { array_shift($this->_log); } $this->_log[] = $value; } /** * Connect to the server using the supplied transport and target * * An example $remote string may be 'tcp://mail.example.com:25' or 'ssh://hostname.com:2222' * * @param string $remote Remote * @throws \Zend\Mail\Protocol\Exception * @return boolean */ protected function _connect($remote) { $errorNum = 0; $errorStr = ''; // open connection $this->_socket = @stream_socket_client($remote, $errorNum, $errorStr, self::TIMEOUT_CONNECTION); if ($this->_socket === false) { if ($errorNum == 0) { $errorStr = 'Could not open socket'; } throw new Protocol\Exception\RuntimeException($errorStr); } if (($result = stream_set_timeout($this->_socket, self::TIMEOUT_CONNECTION)) === false) { throw new Protocol\Exception\RuntimeException('Could not set stream timeout'); } return $result; } /** * Disconnect from remote host and free resource * * @return void */ protected function _disconnect() { if (is_resource($this->_socket)) { fclose($this->_socket); } } /** * Send the given request followed by a LINEEND to the server. * * @param string $request * @throws \Zend\Mail\Protocol\Exception * @return integer|boolean Number of bytes written to remote host */ protected function _send($request) { if (!is_resource($this->_socket)) { throw new Protocol\Exception\RuntimeException('No connection has been established to ' . $this->_host); } $this->_request = $request; $result = fwrite($this->_socket, $request . self::EOL); // Save request to internal log $this->_addLog($request . self::EOL); if ($result === false) { throw new Protocol\Exception\RuntimeException('Could not send request to ' . $this->_host); } return $result; } /** * Get a line from the stream. * * @var integer $timeout Per-request timeout value if applicable * @throws \Zend\Mail\Protocol\Exception * @return string */ protected function _receive($timeout = null) { if (!is_resource($this->_socket)) { throw new Protocol\Exception\RuntimeException('No connection has been established to ' . $this->_host); } // Adapters may wish to supply per-commend timeouts according to appropriate RFC if ($timeout !== null) { stream_set_timeout($this->_socket, $timeout); } // Retrieve response $reponse = fgets($this->_socket, 1024); // Save request to internal log $this->_addLog($reponse); // Check meta data to ensure connection is still valid $info = stream_get_meta_data($this->_socket); if (!empty($info['timed_out'])) { throw new Protocol\Exception\RuntimeException($this->_host . ' has timed out'); } if ($reponse === false) { throw new Protocol\Exception\RuntimeException('Could not read from ' . $this->_host); } return $reponse; } /** * Parse server response for successful codes * * Read the response from the stream and check for expected return code. * Throws a Zend_Mail_Protocol_Exception if an unexpected code is returned. * * @param string|array $code One or more codes that indicate a successful response * @throws \Zend\Mail\Protocol\Exception * @return string Last line of response string */ protected function _expect($code, $timeout = null) { $this->_response = array(); $cmd = ''; $more = ''; $msg = ''; $errMsg = ''; if (!is_array($code)) { $code = array($code); } do { $this->_response[] = $result = $this->_receive($timeout); list($cmd, $more, $msg) = preg_split('/([\s-]+)/', $result, 2, PREG_SPLIT_DELIM_CAPTURE); if ($errMsg !== '') { $errMsg .= ' ' . $msg; } elseif ($cmd === null || !in_array($cmd, $code)) { $errMsg = $msg; } } while (strpos($more, '-') === 0); // The '-' message prefix indicates an information string instead of a response string. if ($errMsg !== '') { throw new Protocol\Exception\RuntimeException($errMsg); } return $msg; } }
Java
/* Copyright (c) 2013, Groupon, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of GROUPON nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.groupon.mapreduce.mongo.in; import com.groupon.mapreduce.mongo.WritableBSONObject; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.io.IOException; import java.util.Iterator; /** * This reads Mongo Records from an Extent and returns Hadoop Records as WritableBSONObjects. The key * returned to the Mapper is the _id field from the Mongo Record as Text. */ public class MongoRecordReader extends RecordReader<Text, WritableBSONObject> { private Record current = null; private Iterator<Record> iterator = null; private FileSystem fs; @Override public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { MongoInputSplit mongoInputSplit = (MongoInputSplit) inputSplit; fs = ((MongoInputSplit) inputSplit).getExtent().getPath().getFileSystem(taskAttemptContext.getConfiguration()); iterator = mongoInputSplit.getExtent().iterator(fs); } @Override public boolean nextKeyValue() throws IOException, InterruptedException { if (!iterator.hasNext()) return false; current = iterator.next(); return true; } @Override public Text getCurrentKey() throws IOException, InterruptedException { return new Text(current.getId(fs)); } @Override public WritableBSONObject getCurrentValue() throws IOException, InterruptedException { return new WritableBSONObject(current.getContent(fs)); } @Override public float getProgress() throws IOException, InterruptedException { if (!iterator.hasNext()) return 1.0f; return 0.0f; } @Override public void close() throws IOException { } }
Java
import numpy as np from Coupling import Coupling class Coupling2DCavities2D(Coupling): """ Coupling for cavity2D to cavity transmission. """ @property def impedance_from(self): """ Choses the right impedance of subsystem_from. Applies boundary conditions correction as well. """ return self.subsystem_from.impedance @property def impedance_to(self): """ Choses the right impedance of subsystem_from. Applies boundary conditions correction as well. """ return self.subsystem_to.impedance @property def tau(self): """ Transmission coefficient. """ return np.zeros(self.frequency.amount) @property def clf(self): """ Coupling loss factor for transmission from a 2D cavity to a cavity. .. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi} See BAC, equation 3.14 """ return self.tau / (4.0 * np.pi)
Java
/* Generated by Font Squirrel (http://www.fontsquirrel.com) on June 7, 2012 02:07:42 PM America/New_York */ @font-face { font-family: 'OpenSans'; src: url('OpenSans-Light-webfont.eot'); src: url('OpenSans-Light-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-Light-webfont.woff') format('woff'), url('OpenSans-Light-webfont.ttf') format('truetype'), url('OpenSans-Light-webfont.svg#OpenSansLight') format('svg'); font-weight: lighter; font-weight: 300; font-style: normal; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-LightItalic-webfont.eot'); src: url('OpenSans-LightItalic-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-LightItalic-webfont.woff') format('woff'), url('OpenSans-LightItalic-webfont.ttf') format('truetype'), url('OpenSans-LightItalic-webfont.svg#OpenSansLightItalic') format('svg'); font-weight: lighter; font-weight: 300; font-style: italic; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-Regular-webfont.eot'); src: url('OpenSans-Regular-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-Regular-webfont.woff') format('woff'), url('OpenSans-Regular-webfont.ttf') format('truetype'), url('OpenSans-Regular-webfont.svg#OpenSansRegular') format('svg'); font-weight: normal; font-weight: 400; font-style: normal; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-Italic-webfont.eot'); src: url('OpenSans-Italic-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-Italic-webfont.woff') format('woff'), url('OpenSans-Italic-webfont.ttf') format('truetype'), url('OpenSans-Italic-webfont.svg#OpenSansItalic') format('svg'); font-weight: normal; font-weight: 400; font-style: italic; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-Semibold-webfont.eot'); src: url('OpenSans-Semibold-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-Semibold-webfont.woff') format('woff'), url('OpenSans-Semibold-webfont.ttf') format('truetype'), url('OpenSans-Semibold-webfont.svg#OpenSansSemibold') format('svg'); font-weight: bold; font-weight: 700; font-style: normal; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-SemiboldItalic-webfont.eot'); src: url('OpenSans-SemiboldItalic-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-SemiboldItalic-webfont.woff') format('woff'), url('OpenSans-SemiboldItalic-webfont.ttf') format('truetype'), url('OpenSans-SemiboldItalic-webfont.svg#OpenSansSemiboldItalic') format('svg'); font-weight: bold; font-weight: 700; font-style: italic; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-Bold-webfont.eot'); src: url('OpenSans-Bold-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-Bold-webfont.woff') format('woff'), url('OpenSans-Bold-webfont.ttf') format('truetype'), url('OpenSans-Bold-webfont.svg#OpenSansBold') format('svg'); font-weight: bolder; font-weight: 700; font-style: normal; } @font-face { font-family: 'OpenSans'; src: url('OpenSans-BoldItalic-webfont.eot'); src: url('OpenSans-BoldItalic-webfont.eot?#iefix') format('embedded-opentype'), url('OpenSans-BoldItalic-webfont.woff') format('woff'), url('OpenSans-BoldItalic-webfont.ttf') format('truetype'), url('OpenSans-BoldItalic-webfont.svg#OpenSansBoldItalic') format('svg'); font-weight: bolder; font-weight: 700; font-style: italic; }
Java
#include "Stencil1D.h" int cncMain(int argc, char *argv[]) { CNC_REQUIRE(argc == 4, "Usage: %s NUM_TILES TILE_SIZE NUM_TIMESTEPS\n", argv[0]); // Create a new graph context Stencil1DCtx *context = Stencil1D_create(); // initialize graph context parameters context->numTiles = atoi(argv[1]); context->tileSize = atoi(argv[2]); context->lastTimestep = atoi(argv[3]); // Launch the graph for execution Stencil1D_launch(NULL, context); // Exit when the graph execution completes CNC_SHUTDOWN_ON_FINISH(context); return 0; }
Java
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/base/clipboard/test/test_clipboard.h" #include <stddef.h> #include <memory> #include <utility> #include "base/containers/contains.h" #include "base/memory/ptr_util.h" #include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/strings/utf_string_conversions.h" #include "build/build_config.h" #include "build/buildflag.h" #include "build/chromecast_buildflags.h" #include "build/chromeos_buildflags.h" #include "skia/ext/skia_utils_base.h" #include "third_party/abseil-cpp/absl/types/optional.h" #include "ui/base/clipboard/clipboard.h" #include "ui/base/clipboard/clipboard_constants.h" #include "ui/base/clipboard/clipboard_monitor.h" #include "ui/base/clipboard/custom_data_helper.h" #include "ui/base/data_transfer_policy/data_transfer_endpoint.h" #include "ui/base/data_transfer_policy/data_transfer_policy_controller.h" #include "ui/gfx/codec/png_codec.h" namespace ui { namespace { bool IsReadAllowed(const DataTransferEndpoint* src, const DataTransferEndpoint* dst) { auto* policy_controller = DataTransferPolicyController::Get(); if (!policy_controller) return true; return policy_controller->IsClipboardReadAllowed(src, dst, absl::nullopt); } } // namespace TestClipboard::TestClipboard() : default_store_buffer_(ClipboardBuffer::kCopyPaste) {} TestClipboard::~TestClipboard() = default; TestClipboard* TestClipboard::CreateForCurrentThread() { base::AutoLock lock(Clipboard::ClipboardMapLock()); auto* clipboard = new TestClipboard; (*Clipboard::ClipboardMapPtr())[base::PlatformThread::CurrentId()] = base::WrapUnique(clipboard); return clipboard; } void TestClipboard::SetLastModifiedTime(const base::Time& time) { last_modified_time_ = time; } void TestClipboard::OnPreShutdown() {} DataTransferEndpoint* TestClipboard::GetSource(ClipboardBuffer buffer) const { return GetStore(buffer).GetDataSource(); } const ClipboardSequenceNumberToken& TestClipboard::GetSequenceNumber( ClipboardBuffer buffer) const { return GetStore(buffer).sequence_number; } bool TestClipboard::IsFormatAvailable( const ClipboardFormatType& format, ClipboardBuffer buffer, const ui::DataTransferEndpoint* data_dst) const { if (!IsReadAllowed(GetStore(buffer).data_src.get(), data_dst)) return false; #if defined(OS_LINUX) || defined(OS_CHROMEOS) // The linux clipboard treats the presence of text on the clipboard // as the url format being available. if (format == ClipboardFormatType::UrlType()) return IsFormatAvailable(ClipboardFormatType::PlainTextType(), buffer, data_dst); #endif // defined(OS_LINUX) || defined(OS_CHROMEOS) const DataStore& store = GetStore(buffer); if (format == ClipboardFormatType::FilenamesType()) return !store.filenames.empty(); // Chrome can retrieve an image from the clipboard as either a bitmap or PNG. if (format == ClipboardFormatType::PngType() || format == ClipboardFormatType::BitmapType()) { return base::Contains(store.data, ClipboardFormatType::PngType()) || base::Contains(store.data, ClipboardFormatType::BitmapType()); } return base::Contains(store.data, format); } void TestClipboard::Clear(ClipboardBuffer buffer) { GetStore(buffer).Clear(); } std::vector<std::u16string> TestClipboard::GetStandardFormats( ClipboardBuffer buffer, const DataTransferEndpoint* data_dst) const { std::vector<std::u16string> types; const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return types; if (IsFormatAvailable(ClipboardFormatType::PlainTextType(), buffer, data_dst)) { types.push_back(base::UTF8ToUTF16(kMimeTypeText)); } if (IsFormatAvailable(ClipboardFormatType::HtmlType(), buffer, data_dst)) types.push_back(base::UTF8ToUTF16(kMimeTypeHTML)); if (IsFormatAvailable(ClipboardFormatType::SvgType(), buffer, data_dst)) types.push_back(base::UTF8ToUTF16(kMimeTypeSvg)); if (IsFormatAvailable(ClipboardFormatType::RtfType(), buffer, data_dst)) types.push_back(base::UTF8ToUTF16(kMimeTypeRTF)); if (IsFormatAvailable(ClipboardFormatType::PngType(), buffer, data_dst) || IsFormatAvailable(ClipboardFormatType::BitmapType(), buffer, data_dst)) types.push_back(base::UTF8ToUTF16(kMimeTypePNG)); if (IsFormatAvailable(ClipboardFormatType::FilenamesType(), buffer, data_dst)) types.push_back(base::UTF8ToUTF16(kMimeTypeURIList)); auto it = store.data.find(ClipboardFormatType::WebCustomDataType()); if (it != store.data.end()) ReadCustomDataTypes(it->second.c_str(), it->second.size(), &types); return types; } void TestClipboard::ReadAvailableTypes( ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::vector<std::u16string>* types) const { DCHECK(types); types->clear(); if (!IsReadAllowed(GetStore(buffer).data_src.get(), data_dst)) return; *types = GetStandardFormats(buffer, data_dst); } void TestClipboard::ReadText(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::u16string* result) const { if (!IsReadAllowed(GetStore(buffer).data_src.get(), data_dst)) return; std::string result8; ReadAsciiText(buffer, data_dst, &result8); *result = base::UTF8ToUTF16(result8); } // TODO(crbug.com/1103215): |data_dst| should be supported. void TestClipboard::ReadAsciiText(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::string* result) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; result->clear(); auto it = store.data.find(ClipboardFormatType::PlainTextType()); if (it != store.data.end()) *result = it->second; } void TestClipboard::ReadHTML(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::u16string* markup, std::string* src_url, uint32_t* fragment_start, uint32_t* fragment_end) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; markup->clear(); src_url->clear(); auto it = store.data.find(ClipboardFormatType::HtmlType()); if (it != store.data.end()) *markup = base::UTF8ToUTF16(it->second); *src_url = store.html_src_url; *fragment_start = 0; *fragment_end = base::checked_cast<uint32_t>(markup->size()); } void TestClipboard::ReadSvg(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::u16string* result) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; result->clear(); auto it = store.data.find(ClipboardFormatType::SvgType()); if (it != store.data.end()) *result = base::UTF8ToUTF16(it->second); } void TestClipboard::ReadRTF(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::string* result) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; result->clear(); auto it = store.data.find(ClipboardFormatType::RtfType()); if (it != store.data.end()) *result = it->second; } void TestClipboard::ReadPng(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, ReadPngCallback callback) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) { std::move(callback).Run(std::vector<uint8_t>()); return; } std::move(callback).Run(store.png); } // TODO(crbug.com/1103215): |data_dst| should be supported. void TestClipboard::ReadCustomData(ClipboardBuffer buffer, const std::u16string& type, const DataTransferEndpoint* data_dst, std::u16string* result) const {} void TestClipboard::ReadFilenames(ClipboardBuffer buffer, const DataTransferEndpoint* data_dst, std::vector<ui::FileInfo>* result) const { const DataStore& store = GetStore(buffer); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; *result = store.filenames; } // TODO(crbug.com/1103215): |data_dst| should be supported. void TestClipboard::ReadBookmark(const DataTransferEndpoint* data_dst, std::u16string* title, std::string* url) const { const DataStore& store = GetDefaultStore(); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; if (url) { auto it = store.data.find(ClipboardFormatType::UrlType()); if (it != store.data.end()) *url = it->second; } if (title) *title = base::UTF8ToUTF16(store.url_title); } void TestClipboard::ReadData(const ClipboardFormatType& format, const DataTransferEndpoint* data_dst, std::string* result) const { const DataStore& store = GetDefaultStore(); if (!IsReadAllowed(store.data_src.get(), data_dst)) return; result->clear(); auto it = store.data.find(format); if (it != store.data.end()) *result = it->second; } base::Time TestClipboard::GetLastModifiedTime() const { return last_modified_time_; } void TestClipboard::ClearLastModifiedTime() { last_modified_time_ = base::Time(); } #if defined(USE_OZONE) bool TestClipboard::IsSelectionBufferAvailable() const { return true; } #endif // defined(USE_OZONE) void TestClipboard::WritePortableAndPlatformRepresentations( ClipboardBuffer buffer, const ObjectMap& objects, std::vector<Clipboard::PlatformRepresentation> platform_representations, std::unique_ptr<DataTransferEndpoint> data_src) { Clear(buffer); default_store_buffer_ = buffer; DispatchPlatformRepresentations(std::move(platform_representations)); for (const auto& kv : objects) DispatchPortableRepresentation(kv.first, kv.second); default_store_buffer_ = ClipboardBuffer::kCopyPaste; GetStore(buffer).SetDataSource(std::move(data_src)); } void TestClipboard::WriteText(const char* text_data, size_t text_len) { std::string text(text_data, text_len); GetDefaultStore().data[ClipboardFormatType::PlainTextType()] = text; #if defined(OS_WIN) // Create a dummy entry. GetDefaultStore().data[ClipboardFormatType::PlainTextAType()]; #endif if (IsSupportedClipboardBuffer(ClipboardBuffer::kSelection)) GetStore(ClipboardBuffer::kSelection) .data[ClipboardFormatType::PlainTextType()] = text; ClipboardMonitor::GetInstance()->NotifyClipboardDataChanged(); } void TestClipboard::WriteHTML(const char* markup_data, size_t markup_len, const char* url_data, size_t url_len) { std::u16string markup; base::UTF8ToUTF16(markup_data, markup_len, &markup); GetDefaultStore().data[ClipboardFormatType::HtmlType()] = base::UTF16ToUTF8(markup); GetDefaultStore().html_src_url = std::string(url_data, url_len); } void TestClipboard::WriteSvg(const char* markup_data, size_t markup_len) { std::u16string markup; base::UTF8ToUTF16(markup_data, markup_len, &markup); GetDefaultStore().data[ClipboardFormatType::SvgType()] = base::UTF16ToUTF8(markup); } void TestClipboard::WriteRTF(const char* rtf_data, size_t data_len) { GetDefaultStore().data[ClipboardFormatType::RtfType()] = std::string(rtf_data, data_len); } void TestClipboard::WriteFilenames(std::vector<ui::FileInfo> filenames) { GetDefaultStore().filenames = std::move(filenames); } void TestClipboard::WriteBookmark(const char* title_data, size_t title_len, const char* url_data, size_t url_len) { GetDefaultStore().data[ClipboardFormatType::UrlType()] = std::string(url_data, url_len); #if !defined(OS_WIN) GetDefaultStore().url_title = std::string(title_data, title_len); #endif } void TestClipboard::WriteWebSmartPaste() { // Create a dummy entry. GetDefaultStore().data[ClipboardFormatType::WebKitSmartPasteType()]; } void TestClipboard::WriteBitmap(const SkBitmap& bitmap) { // We expect callers to sanitize `bitmap` to be N32 color type, to avoid // out-of-bounds issues due to unexpected bits-per-pixel while copying the // bitmap's pixel buffer. This DCHECK is to help alert us if we've missed // something. DCHECK_EQ(bitmap.colorType(), kN32_SkColorType); // Create a dummy entry. GetDefaultStore().data[ClipboardFormatType::BitmapType()]; gfx::PNGCodec::EncodeBGRASkBitmap(bitmap, false, &GetDefaultStore().png); ClipboardMonitor::GetInstance()->NotifyClipboardDataChanged(); } void TestClipboard::WriteData(const ClipboardFormatType& format, const char* data_data, size_t data_len) { GetDefaultStore().data[format] = std::string(data_data, data_len); } TestClipboard::DataStore::DataStore() = default; TestClipboard::DataStore::DataStore(const DataStore& other) { sequence_number = other.sequence_number; data = other.data; url_title = other.url_title; html_src_url = other.html_src_url; png = other.png; data_src = other.data_src ? std::make_unique<DataTransferEndpoint>( DataTransferEndpoint(*(other.data_src))) : nullptr; } TestClipboard::DataStore& TestClipboard::DataStore::operator=( const DataStore& other) { sequence_number = other.sequence_number; data = other.data; url_title = other.url_title; html_src_url = other.html_src_url; png = other.png; data_src = other.data_src ? std::make_unique<DataTransferEndpoint>( DataTransferEndpoint(*(other.data_src))) : nullptr; return *this; } TestClipboard::DataStore::~DataStore() = default; void TestClipboard::DataStore::Clear() { data.clear(); url_title.clear(); html_src_url.clear(); png.clear(); filenames.clear(); data_src.reset(); } void TestClipboard::DataStore::SetDataSource( std::unique_ptr<DataTransferEndpoint> new_data_src) { data_src = std::move(new_data_src); } DataTransferEndpoint* TestClipboard::DataStore::GetDataSource() const { return data_src.get(); } const TestClipboard::DataStore& TestClipboard::GetStore( ClipboardBuffer buffer) const { CHECK(IsSupportedClipboardBuffer(buffer)); return stores_[buffer]; } TestClipboard::DataStore& TestClipboard::GetStore(ClipboardBuffer buffer) { CHECK(IsSupportedClipboardBuffer(buffer)); DataStore& store = stores_[buffer]; store.sequence_number = ClipboardSequenceNumberToken(); return store; } const TestClipboard::DataStore& TestClipboard::GetDefaultStore() const { return GetStore(default_store_buffer_); } TestClipboard::DataStore& TestClipboard::GetDefaultStore() { return GetStore(default_store_buffer_); } } // namespace ui
Java
''' The `Filter` hierarchy contains Transformer classes that take a `Stim` of one type as input and return a `Stim` of the same type as output (but with some changes to its data). ''' from .audio import (AudioTrimmingFilter, AudioResamplingFilter) from .base import TemporalTrimmingFilter from .image import (ImageCroppingFilter, ImageResizingFilter, PillowImageFilter) from .text import (WordStemmingFilter, TokenizingFilter, TokenRemovalFilter, PunctuationRemovalFilter, LowerCasingFilter) from .video import (FrameSamplingFilter, VideoTrimmingFilter) __all__ = [ 'AudioTrimmingFilter', 'AudioResamplingFilter', 'TemporalTrimmingFilter', 'ImageCroppingFilter', 'ImageResizingFilter', 'PillowImageFilter', 'WordStemmingFilter', 'TokenizingFilter', 'TokenRemovalFilter', 'PunctuationRemovalFilter', 'LowerCasingFilter', 'FrameSamplingFilter', 'VideoTrimmingFilter' ]
Java
package ch.epfl.yinyang package transformers import ch.epfl.yinyang._ import ch.epfl.yinyang.transformers._ import scala.reflect.macros.blackbox.Context import language.experimental.macros import scala.collection.mutable import scala.collection.mutable.ArrayBuffer /** * Converts captured variables to holes, which will be passed to the generated * code at runtime as arguments to the apply method. Exposes all holes in the * holeTable, which maps from holeIds to symbolIds. * * Features covered are: * - identifiers -> `hole[T](classTag[T], holeId)` * - fields (TODO) * - no parameter methods (TODO) * - no parameter functions (TODO) */ trait HoleTransformation extends MacroModule with TransformationUtils { def holeMethod: String import c.universe._ /** SymbolIds indexed by holeIds. */ val holeTable = new ArrayBuffer[Int] object HoleTransformer { def apply(toHoles: List[Symbol] = Nil, className: String)(tree: Tree) = { val t = new HoleTransformer(toHoles map symbolId).transform(tree) log("holeTransformed (transforming " + toHoles + "): " + code(t), 2) log("holeTable (holeId -> symbolId): " + holeTable, 2) t } } /** * Transforms all identifiers with symbolIds in `toHoles` to * `hole[T](classTag[T], holeId)` and builds the holeTable mapping from * holeIds to symbolIds. */ class HoleTransformer(toHoles: List[Int]) extends Transformer { override def transform(tree: Tree): Tree = tree match { case i @ Ident(s) if toHoles contains symbolId(i.symbol) => { val index = { val sId = symbolId(i.symbol) if (holeTable.contains(sId)) holeTable.indexOf(sId) else { holeTable += symbolId(i.symbol) holeTable.size - 1 } } Apply( Select(This(typeNames.EMPTY), TermName(holeMethod)), List( TypeApply( Select(This(typeNames.EMPTY), TermName("runtimeType")), List(TypeTree(i.tpe.widen))), Literal(Constant(index)))) } case _ => super.transform(tree) } } }
Java
# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make <target>' where <target> is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." livehtml: sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-sunlight.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-sunlight.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/python-sunlight" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/python-sunlight" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt."
Java
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * @category Zend * @package Zend_Tool * @subpackage Framework * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ /** * @namespace */ namespace Zend\Tool\Project\Provider; /** * @uses \Zend\Tool\Framework\Provider\Pretendable * @uses \Zend\Tool\Project\Exception * @uses \Zend\Tool\Project\Provider\AbstractProvider * @category Zend * @package Zend_Tool * @copyright Copyright (c) 2005-2011 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Application extends AbstractProvider implements \Zend\Tool\Framework\Provider\Pretendable { protected $_specialties = array('ClassNamePrefix'); /** * * @param $classNamePrefix Prefix of classes * @param $force */ public function changeClassNamePrefix($classNamePrefix /* , $force = false */) { $profile = $this->_loadProfile(self::NO_PROFILE_THROW_EXCEPTION); $originalClassNamePrefix = $classNamePrefix; if (substr($classNamePrefix, -1) != '\\') { $classNamePrefix .= '\\'; } $configFileResource = $profile->search('ApplicationConfigFile'); $zc = $configFileResource->getAsZendConfig('production'); if ($zc->appnamespace == $classNamePrefix) { throw new \Zend\Tool\Project\Exception('The requested name ' . $classNamePrefix . ' is already the prefix.'); } // remove the old $configFileResource->removeStringItem('appnamespace', 'production'); $configFileResource->create(); // add the new $configFileResource->addStringItem('appnamespace', $classNamePrefix, 'production', true); $configFileResource->create(); // update the project profile $applicationDirectory = $profile->search('ApplicationDirectory'); $applicationDirectory->setClassNamePrefix($classNamePrefix); $response = $this->_registry->getResponse(); if ($originalClassNamePrefix !== $classNamePrefix) { $response->appendContent( 'Note: the name provided "' . $originalClassNamePrefix . '" was' . ' altered to "' . $classNamePrefix . '" for correctness.', array('color' => 'yellow') ); } // note to the user $response->appendContent('Note: All existing models will need to be altered to this new namespace by hand', array('color' => 'yellow')); $response->appendContent('application.ini updated with new appnamespace ' . $classNamePrefix); // store profile $this->_storeProfile(); } }
Java
/* * Copyright (c) 2013 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "vp9/common/vp9_convolve.h" #include <assert.h> #include "./vpx_config.h" #include "./vp9_rtcd.h" #include "vp9/common/vp9_common.h" #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" #define VP9_FILTER_WEIGHT 128 #define VP9_FILTER_SHIFT 7 /* Assume a bank of 16 filters to choose from. There are two implementations * for filter wrapping behavior, since we want to be able to pick which filter * to start with. We could either: * * 1) make filter_ a pointer to the base of the filter array, and then add an * additional offset parameter, to choose the starting filter. * 2) use a pointer to 2 periods worth of filters, so that even if the original * phase offset is at 15/16, we'll have valid data to read. The filter * tables become [32][8], and the second half is duplicated. * 3) fix the alignment of the filter tables, so that we know the 0/16 is * always 256 byte aligned. * * Implementations 2 and 3 are likely preferable, as they avoid an extra 2 * parameters, and switching between them is trivial, with the * ALIGN_FILTERS_256 macro, below. */ #define ALIGN_FILTERS_256 1 static void convolve_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x0, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int taps) { int x, y, k, sum; const int16_t *filter_x_base = filter_x0; #if ALIGN_FILTERS_256 filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff); #endif /* Adjust base pointer address for this source line */ src -= taps / 2 - 1; for (y = 0; y < h; ++y) { /* Pointer to filter to use */ const int16_t *filter_x = filter_x0; /* Initial phase offset */ int x0_q4 = (filter_x - filter_x_base) / taps; int x_q4 = x0_q4; for (x = 0; x < w; ++x) { /* Per-pixel src offset */ int src_x = (x_q4 - x0_q4) >> 4; for (sum = 0, k = 0; k < taps; ++k) { sum += src[src_x + k] * filter_x[k]; } sum += (VP9_FILTER_WEIGHT >> 1); dst[x] = clip_pixel(sum >> VP9_FILTER_SHIFT); /* Adjust source and filter to use for the next pixel */ x_q4 += x_step_q4; filter_x = filter_x_base + (x_q4 & 0xf) * taps; } src += src_stride; dst += dst_stride; } } static void convolve_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x0, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int taps) { int x, y, k, sum; const int16_t *filter_x_base = filter_x0; #if ALIGN_FILTERS_256 filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff); #endif /* Adjust base pointer address for this source line */ src -= taps / 2 - 1; for (y = 0; y < h; ++y) { /* Pointer to filter to use */ const int16_t *filter_x = filter_x0; /* Initial phase offset */ int x0_q4 = (filter_x - filter_x_base) / taps; int x_q4 = x0_q4; for (x = 0; x < w; ++x) { /* Per-pixel src offset */ int src_x = (x_q4 - x0_q4) >> 4; for (sum = 0, k = 0; k < taps; ++k) { sum += src[src_x + k] * filter_x[k]; } sum += (VP9_FILTER_WEIGHT >> 1); dst[x] = (dst[x] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1; /* Adjust source and filter to use for the next pixel */ x_q4 += x_step_q4; filter_x = filter_x_base + (x_q4 & 0xf) * taps; } src += src_stride; dst += dst_stride; } } static void convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y0, int y_step_q4, int w, int h, int taps) { int x, y, k, sum; const int16_t *filter_y_base = filter_y0; #if ALIGN_FILTERS_256 filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff); #endif /* Adjust base pointer address for this source column */ src -= src_stride * (taps / 2 - 1); for (x = 0; x < w; ++x) { /* Pointer to filter to use */ const int16_t *filter_y = filter_y0; /* Initial phase offset */ int y0_q4 = (filter_y - filter_y_base) / taps; int y_q4 = y0_q4; for (y = 0; y < h; ++y) { /* Per-pixel src offset */ int src_y = (y_q4 - y0_q4) >> 4; for (sum = 0, k = 0; k < taps; ++k) { sum += src[(src_y + k) * src_stride] * filter_y[k]; } sum += (VP9_FILTER_WEIGHT >> 1); dst[y * dst_stride] = clip_pixel(sum >> VP9_FILTER_SHIFT); /* Adjust source and filter to use for the next pixel */ y_q4 += y_step_q4; filter_y = filter_y_base + (y_q4 & 0xf) * taps; } ++src; ++dst; } } static void convolve_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y0, int y_step_q4, int w, int h, int taps) { int x, y, k, sum; const int16_t *filter_y_base = filter_y0; #if ALIGN_FILTERS_256 filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff); #endif /* Adjust base pointer address for this source column */ src -= src_stride * (taps / 2 - 1); for (x = 0; x < w; ++x) { /* Pointer to filter to use */ const int16_t *filter_y = filter_y0; /* Initial phase offset */ int y0_q4 = (filter_y - filter_y_base) / taps; int y_q4 = y0_q4; for (y = 0; y < h; ++y) { /* Per-pixel src offset */ int src_y = (y_q4 - y0_q4) >> 4; for (sum = 0, k = 0; k < taps; ++k) { sum += src[(src_y + k) * src_stride] * filter_y[k]; } sum += (VP9_FILTER_WEIGHT >> 1); dst[y * dst_stride] = (dst[y * dst_stride] + clip_pixel(sum >> VP9_FILTER_SHIFT) + 1) >> 1; /* Adjust source and filter to use for the next pixel */ y_q4 += y_step_q4; filter_y = filter_y_base + (y_q4 & 0xf) * taps; } ++src; ++dst; } } static void convolve_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int taps) { /* Fixed size intermediate buffer places limits on parameters. * Maximum intermediate_height is 135, for y_step_q4 == 32, * h == 64, taps == 8. */ uint8_t temp[64 * 135]; int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1; assert(w <= 64); assert(h <= 64); assert(taps <= 8); assert(y_step_q4 <= 32); if (intermediate_height < h) intermediate_height = h; convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, intermediate_height, taps); convolve_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, taps); } static void convolve_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int taps) { /* Fixed size intermediate buffer places limits on parameters. * Maximum intermediate_height is 135, for y_step_q4 == 32, * h == 64, taps == 8. */ uint8_t temp[64 * 135]; int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1; assert(w <= 64); assert(h <= 64); assert(taps <= 8); assert(y_step_q4 <= 32); if (intermediate_height < h) intermediate_height = h; convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, intermediate_height, taps); convolve_avg_vert_c(temp + 64 * (taps / 2 - 1), 64, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, taps); } void vp9_convolve8_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { convolve_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); } void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { convolve_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); } void vp9_convolve8_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { convolve_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); } void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { convolve_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); } void vp9_convolve8_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { convolve_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h, 8); } void vp9_convolve8_avg_c(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { /* Fixed size intermediate buffer places limits on parameters. */ DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 64 * 64); assert(w <= 64); assert(h <= 64); vp9_convolve8(src, src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, h); vp9_convolve_avg(temp, 64, dst, dst_stride, NULL, 0, /* These unused parameter should be removed! */ NULL, 0, /* These unused parameter should be removed! */ w, h); } void vp9_convolve_copy(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h) { if (w == 16 && h == 16) { vp9_copy_mem16x16(src, src_stride, dst, dst_stride); } else if (w == 8 && h == 8) { vp9_copy_mem8x8(src, src_stride, dst, dst_stride); } else if (w == 8 && h == 4) { vp9_copy_mem8x4(src, src_stride, dst, dst_stride); } else { int r; for (r = h; r > 0; --r) { memcpy(dst, src, w); src += src_stride; dst += dst_stride; } } } void vp9_convolve_avg(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h) { int x, y; for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { dst[x] = (dst[x] + src[x] + 1) >> 1; } src += src_stride; dst += dst_stride; } }
Java
#!/usr/bin/env python from setuptools import setup, find_packages setup(name='reddit_gold', description='reddit gold', version='0.1', author='Chad Birch', author_email='[email protected]', packages=find_packages(), install_requires=[ 'r2', ], entry_points={ 'r2.plugin': ['gold = reddit_gold:Gold'] }, include_package_data=True, zip_safe=False, )
Java
<?php /* * ircPlanet Services for ircu * Copyright (c) 2005 Brian Cline. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of ircPlanet nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ if (!($chan = $this->getChannel($chan_name))) { $bot->noticef($user, "Nobody is on channel %s.", $chan_name); return false; } if (!$chan->isOn($bot->getNumeric())) { $bot->noticef($user, 'I am not on %s.', $chan->getName()); return false; } $reason = assemble($pargs, 2); $users = $this->getChannelUsersByMask($chan_name); foreach ($users as $numeric => $chan_user) { if (!$chan_user->isBot() && $chan_user != $user) { $mask = $chan_user->getHostMask(); $ban = new DB_Ban($chan_reg->getId(), $user->getAccountId(), $mask); $ban->setReason($reason); $chan_reg->addBan($ban); $bot->mode($chan->getName(), "-o+b $numeric $mask"); $bot->kick($chan->getName(), $numeric, $reason); $chan->addBan($mask); } } $chan_reg->save();
Java
# -*- coding: utf-8 -*- from django.contrib import admin from ionyweb.plugin_app.plugin_video.models import Plugin_Video admin.site.register(Plugin_Video)
Java
/* Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /////////////////////////////////////////////////////////////////////////////// // FnEncode.h // // Copyright (c) 2007, Electronic Arts Inc. All rights reserved. // Created by Alex Liberman and Talin. // // Character transcoding functions for filenames /////////////////////////////////////////////////////////////////////////////// #ifndef EAIO_FNENCODE_H #define EAIO_FNENCODE_H #ifndef INCLUDED_eabase_H #include <EABase/eabase.h> #endif #include <EAIO/internal/Config.h> #ifndef EAIO_EASTREAM_H #include <EAIO/EAStream.h> // for kLengthNull #endif #ifndef EAIO_PATHSTRING_H #include <EAIO/PathString.h> // for ConvertPathUTF{8,16}ToUTF{8,16} #endif namespace EA { namespace IO { /// StrlcpyUTF16ToUTF8 /// Copies a UTF16 string to a UTF8 string, but otherwise acts similar to strlcpy except for the /// return value. /// Returns the strlen of the destination string. If destination pointer is NULL, returns the /// strlen of the would-be string. /// Specifying a source length of kLengthNull copies from the source string up to the first NULL /// character. EAIO_API size_t StrlcpyUTF16ToUTF8(char8_t* pDest, size_t nDestLength, const char16_t* pSrc, size_t nSrcLength = kLengthNull); /// StrlcpyUTF8ToUTF16 /// Copies a UTF8 string to a UTF16 string, but otherwise acts similar to strlcpy except for the /// return value. /// Returns the strlen of the destination string. If destination pointer is NULL, returns the /// strlen of the would-be string. /// Specifying a source length of kLengthNull copies from the source string up to the first NULL /// character. EAIO_API size_t StrlcpyUTF8ToUTF16(char16_t* pDest, size_t nDestLength, const char8_t* pSrc, size_t nSrcLength = kLengthNull); /////////////////////////////////////////////////////////////////////////////// /// Convenient conversion functions used by EAFileUtil and EAFileNotification /////////////////////////////////////////////////////////////////////////////// /// ConvertPathUTF8ToUTF16 /// Expands the destination to the desired size and then performs a Strlcpy /// with UTF8->UTF16 conversion. /// Returns the number of characters written. EAIO_API uint32_t ConvertPathUTF8ToUTF16(Path::PathString16& dstPath16, const char8_t* pSrcPath8); /// ConvertPathUTF16ToUTF8 /// Expands the destination to the desired size and then performs a Strlcpy with /// UTF16->UTF8 conversion. /// Returns the number of characters written. EAIO_API uint32_t ConvertPathUTF16ToUTF8(Path::PathString8& dstPath8, const char16_t* pSrcPath16); /////////////////////////////////////////////////////////////////////////////// // String comparison, strlen, and strlcpy for this module, since we don't have // access to EACRT. /////////////////////////////////////////////////////////////////////////////// EAIO_API bool StrEq16(const char16_t* str1, const char16_t* str2); EAIO_API size_t EAIOStrlen8(const char8_t* str); EAIO_API size_t EAIOStrlen16(const char16_t* str); EAIO_API size_t EAIOStrlcpy8(char8_t* pDestination, const char8_t* pSource, size_t nDestCapacity); EAIO_API size_t EAIOStrlcpy16(char16_t* pDestination, const char16_t* pSource, size_t nDestCapacity); } // namespace IO } // namespace EA #endif // EAIO_FNENCODE_H
Java
/* * Copyright (c) 2009-2015, United States Government, as represented by the Secretary of Health and Human Services. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the United States Government nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE UNITED STATES GOVERNMENT BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package gov.hhs.fha.nhinc.docquery.nhin.proxy; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.any; import gov.hhs.fha.nhinc.aspect.NwhinInvocationEvent; import gov.hhs.fha.nhinc.common.nhinccommon.AssertionType; import gov.hhs.fha.nhinc.common.nhinccommon.HomeCommunityType; import gov.hhs.fha.nhinc.common.nhinccommon.NhinTargetSystemType; import gov.hhs.fha.nhinc.connectmgr.ConnectionManager; import gov.hhs.fha.nhinc.connectmgr.ConnectionManagerCache; import gov.hhs.fha.nhinc.docquery.aspect.AdhocQueryRequestDescriptionBuilder; import gov.hhs.fha.nhinc.docquery.aspect.AdhocQueryResponseDescriptionBuilder; import gov.hhs.fha.nhinc.messaging.client.CONNECTClient; import gov.hhs.fha.nhinc.messaging.service.port.ServicePortDescriptor; import gov.hhs.fha.nhinc.nhinclib.NhincConstants.UDDI_SPEC_VERSION; import ihe.iti.xds_b._2007.RespondingGatewayQueryPortType; import java.lang.reflect.Method; import javax.xml.ws.Service; import oasis.names.tc.ebxml_regrep.xsd.query._3.AdhocQueryRequest; import org.jmock.Mockery; import org.jmock.integration.junit4.JMock; import org.jmock.integration.junit4.JUnit4Mockery; import org.jmock.lib.legacy.ClassImposteriser; import org.junit.Test; import org.junit.runner.RunWith; /** * * @author Neil Webb */ @RunWith(JMock.class) public class NhinDocQueryWebServiceProxyTest { Mockery context = new JUnit4Mockery() { { setImposteriser(ClassImposteriser.INSTANCE); } }; final Service mockService = context.mock(Service.class); final RespondingGatewayQueryPortType mockPort = context.mock(RespondingGatewayQueryPortType.class); @SuppressWarnings("unchecked") private CONNECTClient<RespondingGatewayQueryPortType> client = mock(CONNECTClient.class); private ConnectionManagerCache cache = mock(ConnectionManagerCache.class); private AdhocQueryRequest request; private AssertionType assertion; @Test public void hasBeginOutboundProcessingEvent() throws Exception { Class<NhinDocQueryProxyWebServiceSecuredImpl> clazz = NhinDocQueryProxyWebServiceSecuredImpl.class; Method method = clazz.getMethod("respondingGatewayCrossGatewayQuery", AdhocQueryRequest.class, AssertionType.class, NhinTargetSystemType.class); NwhinInvocationEvent annotation = method.getAnnotation(NwhinInvocationEvent.class); assertNotNull(annotation); assertEquals(AdhocQueryRequestDescriptionBuilder.class, annotation.beforeBuilder()); assertEquals(AdhocQueryResponseDescriptionBuilder.class, annotation.afterReturningBuilder()); assertEquals("Document Query", annotation.serviceType()); assertEquals("", annotation.version()); } @Test public void testNoMtom() throws Exception { NhinDocQueryProxyWebServiceSecuredImpl impl = getImpl(); NhinTargetSystemType target = getTarget("1.1"); impl.respondingGatewayCrossGatewayQuery(request, assertion, target); verify(client, never()).enableMtom(); } @Test public void testUsingGuidance() throws Exception { NhinDocQueryProxyWebServiceSecuredImpl impl = getImpl(); NhinTargetSystemType target = getTarget("1.1"); impl.respondingGatewayCrossGatewayQuery(request, assertion, target); verify(cache).getEndpointURLByServiceNameSpecVersion(any(String.class), any(String.class), any(UDDI_SPEC_VERSION.class)); } /** * @param hcidValue * @return */ private NhinTargetSystemType getTarget(String hcidValue) { NhinTargetSystemType target = new NhinTargetSystemType(); HomeCommunityType hcid = new HomeCommunityType(); hcid.setHomeCommunityId(hcidValue); target.setHomeCommunity(hcid); target.setUseSpecVersion("2.0"); return target; } /** * @return */ private NhinDocQueryProxyWebServiceSecuredImpl getImpl() { return new NhinDocQueryProxyWebServiceSecuredImpl() { /* * (non-Javadoc) * * @see * gov.hhs.fha.nhinc.docquery.nhin.proxy.NhinDocQueryProxyWebServiceSecuredImpl#getCONNECTClientSecured( * gov.hhs.fha.nhinc.messaging.service.port.ServicePortDescriptor, * gov.hhs.fha.nhinc.common.nhinccommon.AssertionType, java.lang.String, * gov.hhs.fha.nhinc.common.nhinccommon.NhinTargetSystemType) */ @Override public CONNECTClient<RespondingGatewayQueryPortType> getCONNECTClientSecured( ServicePortDescriptor<RespondingGatewayQueryPortType> portDescriptor, AssertionType assertion, String url, NhinTargetSystemType target) { return client; } /* (non-Javadoc) * @see gov.hhs.fha.nhinc.docquery.nhin.proxy.NhinDocQueryProxyWebServiceSecuredImpl#getCMInstance() */ @Override protected ConnectionManager getCMInstance() { return cache; } }; } }
Java
// Copyright (c) 2009 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_VIEWS_TABS_NATIVE_VIEW_PHOTOBOOTH_GTK_H_ #define CHROME_BROWSER_VIEWS_TABS_NATIVE_VIEW_PHOTOBOOTH_GTK_H_ #include "chrome/browser/views/tabs/native_view_photobooth.h" class NativeViewPhotoboothGtk : public NativeViewPhotobooth { public: explicit NativeViewPhotoboothGtk(gfx::NativeView new_view); // Destroys the photo booth window. virtual ~NativeViewPhotoboothGtk(); // Replaces the view in the photo booth with the specified one. virtual void Replace(gfx::NativeView new_view); // Paints the current display image of the window into |canvas|, clipped to // |target_bounds|. virtual void PaintScreenshotIntoCanvas(gfx::Canvas* canvas, const gfx::Rect& target_bounds); private: DISALLOW_COPY_AND_ASSIGN(NativeViewPhotoboothGtk); }; #endif // #ifndef CHROME_BROWSER_VIEWS_TABS_NATIVE_VIEW_PHOTOBOOTH_GTK_H_
Java
<?php /** * Zend Framework (http://framework.zend.com/) * * @link http://github.com/zendframework/zf2 for the canonical source repository * @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ namespace Zend\Form\Element; use Traversable; use Zend\Form\Element; use Zend\Form\ElementInterface; use Zend\Form\Exception; use Zend\Form\Fieldset; use Zend\Form\FieldsetInterface; use Zend\Form\FormInterface; use Zend\Stdlib\ArrayUtils; class Collection extends Fieldset { /** * Default template placeholder */ const DEFAULT_TEMPLATE_PLACEHOLDER = '__index__'; /** * Element used in the collection * * @var ElementInterface */ protected $targetElement; /** * Initial count of target element * * @var int */ protected $count = 1; /** * Are new elements allowed to be added dynamically ? * * @var bool */ protected $allowAdd = true; /** * Are existing elements allowed to be removed dynamically ? * * @var bool */ protected $allowRemove = true; /** * Is the template generated ? * * @var bool */ protected $shouldCreateTemplate = false; /** * Placeholder used in template content for making your life easier with JavaScript * * @var string */ protected $templatePlaceholder = self::DEFAULT_TEMPLATE_PLACEHOLDER; /** * Whether or not to create new objects during modify * * @var bool */ protected $createNewObjects = false; /** * Element used as a template * * @var ElementInterface|FieldsetInterface */ protected $templateElement; /** * The index of the last child element or fieldset * * @var int */ protected $lastChildIndex = -1; /** * Should child elements must be created on self::prepareElement()? * * @var bool */ protected $shouldCreateChildrenOnPrepareElement = true; /** * Accepted options for Collection: * - target_element: an array or element used in the collection * - count: number of times the element is added initially * - allow_add: if set to true, elements can be added to the form dynamically (using JavaScript) * - allow_remove: if set to true, elements can be removed to the form * - should_create_template: if set to true, a template is generated (inside a <span>) * - template_placeholder: placeholder used in the data template * * @param array|Traversable $options * @return Collection */ public function setOptions($options) { parent::setOptions($options); if (isset($options['target_element'])) { $this->setTargetElement($options['target_element']); } if (isset($options['count'])) { $this->setCount($options['count']); } if (isset($options['allow_add'])) { $this->setAllowAdd($options['allow_add']); } if (isset($options['allow_remove'])) { $this->setAllowRemove($options['allow_remove']); } if (isset($options['should_create_template'])) { $this->setShouldCreateTemplate($options['should_create_template']); } if (isset($options['template_placeholder'])) { $this->setTemplatePlaceholder($options['template_placeholder']); } if (isset($options['create_new_objects'])) { $this->setCreateNewObjects($options['create_new_objects']); } return $this; } /** * Checks if the object can be set in this fieldset * * @param object $object * @return bool */ public function allowObjectBinding($object) { return true; } /** * Set the object used by the hydrator * In this case the "object" is a collection of objects * * @param array|Traversable $object * @return Fieldset|FieldsetInterface * @throws Exception\InvalidArgumentException */ public function setObject($object) { if (!is_array($object) && !$object instanceof Traversable) { throw new Exception\InvalidArgumentException(sprintf( '%s expects an array or Traversable object argument; received "%s"', __METHOD__, (is_object($object) ? get_class($object) : gettype($object)) )); } $this->object = $object; $this->count = count($object) > $this->count ? count($object) : $this->count; return $this; } /** * Populate values * * @param array|Traversable $data * @throws \Zend\Form\Exception\InvalidArgumentException * @throws \Zend\Form\Exception\DomainException * @return void */ public function populateValues($data) { if (!is_array($data) && !$data instanceof Traversable) { throw new Exception\InvalidArgumentException(sprintf( '%s expects an array or Traversable set of data; received "%s"', __METHOD__, (is_object($data) ? get_class($data) : gettype($data)) )); } // Can't do anything with empty data if (empty($data)) { return; } if (!$this->allowRemove && count($data) < $this->count) { throw new Exception\DomainException(sprintf( 'There are fewer elements than specified in the collection (%s). Either set the allow_remove option ' . 'to true, or re-submit the form.', get_class($this) )); } // Check to see if elements have been replaced or removed foreach ($this->byName as $name => $elementOrFieldset) { if (isset($data[$name])) { continue; } if (!$this->allowRemove) { throw new Exception\DomainException(sprintf( 'Elements have been removed from the collection (%s) but the allow_remove option is not true.', get_class($this) )); } $this->remove($name); } foreach ($data as $key => $value) { if ($this->has($key)) { $elementOrFieldset = $this->get($key); } else { $elementOrFieldset = $this->addNewTargetElementInstance($key); if ($key > $this->lastChildIndex) { $this->lastChildIndex = $key; } } if ($elementOrFieldset instanceof FieldsetInterface) { $elementOrFieldset->populateValues($value); } else { $elementOrFieldset->setAttribute('value', $value); } } if (!$this->createNewObjects()) { $this->replaceTemplateObjects(); } } /** * Checks if this fieldset can bind data * * @return bool */ public function allowValueBinding() { return true; } /** * Bind values to the object * * @param array $values * @return array|mixed|void */ public function bindValues(array $values = array()) { $collection = array(); foreach ($values as $name => $value) { $element = $this->get($name); if ($element instanceof FieldsetInterface) { $collection[] = $element->bindValues($value); } else { $collection[] = $value; } } return $collection; } /** * Set the initial count of target element * * @param $count * @return Collection */ public function setCount($count) { $this->count = $count > 0 ? $count : 0; return $this; } /** * Get the initial count of target element * * @return int */ public function getCount() { return $this->count; } /** * Set the target element * * @param ElementInterface|array|Traversable $elementOrFieldset * @return Collection * @throws \Zend\Form\Exception\InvalidArgumentException */ public function setTargetElement($elementOrFieldset) { if (is_array($elementOrFieldset) || ($elementOrFieldset instanceof Traversable && !$elementOrFieldset instanceof ElementInterface) ) { $factory = $this->getFormFactory(); $elementOrFieldset = $factory->create($elementOrFieldset); } if (!$elementOrFieldset instanceof ElementInterface) { throw new Exception\InvalidArgumentException(sprintf( '%s requires that $elementOrFieldset be an object implementing %s; received "%s"', __METHOD__, __NAMESPACE__ . '\ElementInterface', (is_object($elementOrFieldset) ? get_class($elementOrFieldset) : gettype($elementOrFieldset)) )); } $this->targetElement = $elementOrFieldset; return $this; } /** * Get target element * * @return ElementInterface|null */ public function getTargetElement() { return $this->targetElement; } /** * Get allow add * * @param bool $allowAdd * @return Collection */ public function setAllowAdd($allowAdd) { $this->allowAdd = (bool) $allowAdd; return $this; } /** * Get allow add * * @return bool */ public function allowAdd() { return $this->allowAdd; } /** * @param bool $allowRemove * @return Collection */ public function setAllowRemove($allowRemove) { $this->allowRemove = (bool) $allowRemove; return $this; } /** * @return bool */ public function allowRemove() { return $this->allowRemove; } /** * If set to true, a template prototype is automatically added to the form to ease the creation of dynamic elements through JavaScript * * @param bool $shouldCreateTemplate * @return Collection */ public function setShouldCreateTemplate($shouldCreateTemplate) { $this->shouldCreateTemplate = (bool) $shouldCreateTemplate; return $this; } /** * Get if the collection should create a template * * @return bool */ public function shouldCreateTemplate() { return $this->shouldCreateTemplate; } /** * Set the placeholder used in the template generated to help create new elements in JavaScript * * @param string $templatePlaceholder * @return Collection */ public function setTemplatePlaceholder($templatePlaceholder) { if (is_string($templatePlaceholder)) { $this->templatePlaceholder = $templatePlaceholder; } return $this; } /** * Get the template placeholder * * @return string */ public function getTemplatePlaceholder() { return $this->templatePlaceholder; } /** * @param bool $createNewObjects * @return Collection */ public function setCreateNewObjects($createNewObjects) { $this->createNewObjects = (bool) $createNewObjects; return $this; } /** * @return bool */ public function createNewObjects() { return $this->createNewObjects; } /** * Get a template element used for rendering purposes only * * @return null|ElementInterface|FieldsetInterface */ public function getTemplateElement() { if ($this->templateElement === null) { $this->templateElement = $this->createTemplateElement(); } return $this->templateElement; } /** * Prepare the collection by adding a dummy template element if the user want one * * @param FormInterface $form * @return mixed|void */ public function prepareElement(FormInterface $form) { if (true === $this->shouldCreateChildrenOnPrepareElement) { if ($this->targetElement !== null && $this->count > 0) { while ($this->count > $this->lastChildIndex + 1) { $this->addNewTargetElementInstance(++$this->lastChildIndex); } } } // Create a template that will also be prepared if ($this->shouldCreateTemplate) { $templateElement = $this->getTemplateElement(); $this->add($templateElement); } parent::prepareElement($form); // The template element has been prepared, but we don't want it to be rendered nor validated, so remove it from the list if ($this->shouldCreateTemplate) { $this->remove($this->templatePlaceholder); } } /** * @return array * @throws \Zend\Form\Exception\InvalidArgumentException * @throws \Zend\Stdlib\Exception\InvalidArgumentException * @throws \Zend\Form\Exception\DomainException * @throws \Zend\Form\Exception\InvalidElementException */ public function extract() { if ($this->object instanceof Traversable) { $this->object = ArrayUtils::iteratorToArray($this->object, false); } if (!is_array($this->object)) { return array(); } $values = array(); foreach ($this->object as $key => $value) { // If a hydrator is provided, our work here is done if ($this->hydrator) { $values[$key] = $this->hydrator->extract($value); continue; } // If the target element is a fieldset that can accept the provided value // we should clone it, inject the value and extract the data if ( $this->targetElement instanceof FieldsetInterface ) { if ( ! $this->targetElement->allowObjectBinding($value) ) { continue; } $targetElement = clone $this->targetElement; $targetElement->setObject($value); $values[$key] = $targetElement->extract(); if (!$this->createNewObjects() && $this->has($key)) { $this->get($key)->setObject($value); } continue; } // If the target element is a non-fieldset element, just use the value if ( $this->targetElement instanceof ElementInterface ) { $values[$key] = $value; if (!$this->createNewObjects() && $this->has($key)) { $this->get($key)->setValue($value); } continue; } } return $values; } /** * Create a new instance of the target element * * @return ElementInterface */ protected function createNewTargetElementInstance() { return clone $this->targetElement; } /** * Add a new instance of the target element * * @param string $name * @return ElementInterface * @throws Exception\DomainException */ protected function addNewTargetElementInstance($name) { $this->shouldCreateChildrenOnPrepareElement = false; $elementOrFieldset = $this->createNewTargetElementInstance(); $elementOrFieldset->setName($name); $this->add($elementOrFieldset); if (!$this->allowAdd && $this->count() > $this->count) { throw new Exception\DomainException(sprintf( 'There are more elements than specified in the collection (%s). Either set the allow_add option ' . 'to true, or re-submit the form.', get_class($this) )); } return $elementOrFieldset; } /** * Create a dummy template element * * @return null|ElementInterface|FieldsetInterface */ protected function createTemplateElement() { if (!$this->shouldCreateTemplate) { return null; } if ($this->templateElement) { return $this->templateElement; } $elementOrFieldset = $this->createNewTargetElementInstance(); $elementOrFieldset->setName($this->templatePlaceholder); return $elementOrFieldset; } /** * Replaces the default template object of a sub element with the corresponding * real entity so that all properties are preserved. * * @return void */ protected function replaceTemplateObjects() { $fieldsets = $this->getFieldsets(); if (!count($fieldsets) || !$this->object) { return; } foreach ($fieldsets as $fieldset) { $i = $fieldset->getName(); if (isset($this->object[$i])) { $fieldset->setObject($this->object[$i]); } } } }
Java
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE415_Double_Free__new_delete_wchar_t_83.h Label Definition File: CWE415_Double_Free__new_delete.label.xml Template File: sources-sinks-83.tmpl.h */ /* * @description * CWE: 415 Double Free * BadSource: Allocate data using new and Deallocae data using delete * GoodSource: Allocate data using new * Sinks: * GoodSink: do nothing * BadSink : Deallocate data using delete * Flow Variant: 83 Data flow: data passed to class constructor and destructor by declaring the class object on the stack * * */ #include "std_testcase.h" #include <wchar.h> namespace CWE415_Double_Free__new_delete_wchar_t_83 { #ifndef OMITBAD class CWE415_Double_Free__new_delete_wchar_t_83_bad { public: CWE415_Double_Free__new_delete_wchar_t_83_bad(wchar_t * dataCopy); ~CWE415_Double_Free__new_delete_wchar_t_83_bad(); private: wchar_t * data; }; #endif /* OMITBAD */ #ifndef OMITGOOD class CWE415_Double_Free__new_delete_wchar_t_83_goodG2B { public: CWE415_Double_Free__new_delete_wchar_t_83_goodG2B(wchar_t * dataCopy); ~CWE415_Double_Free__new_delete_wchar_t_83_goodG2B(); private: wchar_t * data; }; class CWE415_Double_Free__new_delete_wchar_t_83_goodB2G { public: CWE415_Double_Free__new_delete_wchar_t_83_goodB2G(wchar_t * dataCopy); ~CWE415_Double_Free__new_delete_wchar_t_83_goodB2G(); private: wchar_t * data; }; #endif /* OMITGOOD */ }
Java
*> \brief <b> ZCPOSV computes the solution to system of linear equations A * X = B for PO matrices</b> * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZCPOSV + dependencies *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zcposv.f"> *> [TGZ]</a> *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zcposv.f"> *> [ZIP]</a> *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zcposv.f"> *> [TXT]</a> *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZCPOSV( UPLO, N, NRHS, A, LDA, B, LDB, X, LDX, WORK, * SWORK, RWORK, ITER, INFO ) * * .. Scalar Arguments .. * CHARACTER UPLO * INTEGER INFO, ITER, LDA, LDB, LDX, N, NRHS * .. * .. Array Arguments .. * DOUBLE PRECISION RWORK( * ) * COMPLEX SWORK( * ) * COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( N, * ), * $ X( LDX, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZCPOSV computes the solution to a complex system of linear equations *> A * X = B, *> where A is an N-by-N Hermitian positive definite matrix and X and B *> are N-by-NRHS matrices. *> *> ZCPOSV first attempts to factorize the matrix in COMPLEX and use this *> factorization within an iterative refinement procedure to produce a *> solution with COMPLEX*16 normwise backward error quality (see below). *> If the approach fails the method switches to a COMPLEX*16 *> factorization and solve. *> *> The iterative refinement is not going to be a winning strategy if *> the ratio COMPLEX performance over COMPLEX*16 performance is too *> small. A reasonable strategy should take the number of right-hand *> sides and the size of the matrix into account. This might be done *> with a call to ILAENV in the future. Up to now, we always try *> iterative refinement. *> *> The iterative refinement process is stopped if *> ITER > ITERMAX *> or for all the RHS we have: *> RNRM < SQRT(N)*XNRM*ANRM*EPS*BWDMAX *> where *> o ITER is the number of the current iteration in the iterative *> refinement process *> o RNRM is the infinity-norm of the residual *> o XNRM is the infinity-norm of the solution *> o ANRM is the infinity-operator-norm of the matrix A *> o EPS is the machine epsilon returned by DLAMCH('Epsilon') *> The value ITERMAX and BWDMAX are fixed to 30 and 1.0D+00 *> respectively. *> \endverbatim * * Arguments: * ========== * *> \param[in] UPLO *> \verbatim *> UPLO is CHARACTER*1 *> = 'U': Upper triangle of A is stored; *> = 'L': Lower triangle of A is stored. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of linear equations, i.e., the order of the *> matrix A. N >= 0. *> \endverbatim *> *> \param[in] NRHS *> \verbatim *> NRHS is INTEGER *> The number of right hand sides, i.e., the number of columns *> of the matrix B. NRHS >= 0. *> \endverbatim *> *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, *> dimension (LDA,N) *> On entry, the Hermitian matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower *> triangular part of A is not referenced. If UPLO = 'L', the *> leading N-by-N lower triangular part of A contains the lower *> triangular part of the matrix A, and the strictly upper *> triangular part of A is not referenced. *> *> Note that the imaginary parts of the diagonal *> elements need not be set and are assumed to be zero. *> *> On exit, if iterative refinement has been successfully used *> (INFO = 0 and ITER >= 0, see description below), then A is *> unchanged, if double precision factorization has been used *> (INFO = 0 and ITER < 0, see description below), then the *> array A contains the factor U or L from the Cholesky *> factorization A = U**H*U or A = L*L**H. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,N). *> \endverbatim *> *> \param[in] B *> \verbatim *> B is COMPLEX*16 array, dimension (LDB,NRHS) *> The N-by-NRHS right hand side matrix B. *> \endverbatim *> *> \param[in] LDB *> \verbatim *> LDB is INTEGER *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> *> \param[out] X *> \verbatim *> X is COMPLEX*16 array, dimension (LDX,NRHS) *> If INFO = 0, the N-by-NRHS solution matrix X. *> \endverbatim *> *> \param[in] LDX *> \verbatim *> LDX is INTEGER *> The leading dimension of the array X. LDX >= max(1,N). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (N,NRHS) *> This array is used to hold the residual vectors. *> \endverbatim *> *> \param[out] SWORK *> \verbatim *> SWORK is COMPLEX array, dimension (N*(N+NRHS)) *> This array is used to use the single precision matrix and the *> right-hand sides or solutions in single precision. *> \endverbatim *> *> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N) *> \endverbatim *> *> \param[out] ITER *> \verbatim *> ITER is INTEGER *> < 0: iterative refinement has failed, COMPLEX*16 *> factorization has been performed *> -1 : the routine fell back to full precision for *> implementation- or machine-specific reasons *> -2 : narrowing the precision induced an overflow, *> the routine fell back to full precision *> -3 : failure of CPOTRF *> -31: stop the iterative refinement after the 30th *> iterations *> > 0: iterative refinement has been successfully used. *> Returns the number of iterations *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER *> = 0: successful exit *> < 0: if INFO = -i, the i-th argument had an illegal value *> > 0: if INFO = i, the leading minor of order i of *> (COMPLEX*16) A is not positive definite, so the *> factorization could not be completed, and the solution *> has not been computed. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date June 2016 * *> \ingroup complex16POsolve * * ===================================================================== SUBROUTINE ZCPOSV( UPLO, N, NRHS, A, LDA, B, LDB, X, LDX, WORK, $ SWORK, RWORK, ITER, INFO ) * * -- LAPACK driver routine (version 3.8.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * June 2016 * * .. Scalar Arguments .. CHARACTER UPLO INTEGER INFO, ITER, LDA, LDB, LDX, N, NRHS * .. * .. Array Arguments .. DOUBLE PRECISION RWORK( * ) COMPLEX SWORK( * ) COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( N, * ), $ X( LDX, * ) * .. * * ===================================================================== * * .. Parameters .. LOGICAL DOITREF PARAMETER ( DOITREF = .TRUE. ) * INTEGER ITERMAX PARAMETER ( ITERMAX = 30 ) * DOUBLE PRECISION BWDMAX PARAMETER ( BWDMAX = 1.0E+00 ) * COMPLEX*16 NEGONE, ONE PARAMETER ( NEGONE = ( -1.0D+00, 0.0D+00 ), $ ONE = ( 1.0D+00, 0.0D+00 ) ) * * .. Local Scalars .. INTEGER I, IITER, PTSA, PTSX DOUBLE PRECISION ANRM, CTE, EPS, RNRM, XNRM COMPLEX*16 ZDUM * * .. External Subroutines .. EXTERNAL ZAXPY, ZHEMM, ZLACPY, ZLAT2C, ZLAG2C, CLAG2Z, $ CPOTRF, CPOTRS, XERBLA, ZPOTRF, ZPOTRS * .. * .. External Functions .. INTEGER IZAMAX DOUBLE PRECISION DLAMCH, ZLANHE LOGICAL LSAME EXTERNAL IZAMAX, DLAMCH, ZLANHE, LSAME * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, MAX, SQRT * .. Statement Functions .. DOUBLE PRECISION CABS1 * .. * .. Statement Function definitions .. CABS1( ZDUM ) = ABS( DBLE( ZDUM ) ) + ABS( DIMAG( ZDUM ) ) * .. * .. Executable Statements .. * INFO = 0 ITER = 0 * * Test the input parameters. * IF( .NOT.LSAME( UPLO, 'U' ) .AND. .NOT.LSAME( UPLO, 'L' ) ) THEN INFO = -1 ELSE IF( N.LT.0 ) THEN INFO = -2 ELSE IF( NRHS.LT.0 ) THEN INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -7 ELSE IF( LDX.LT.MAX( 1, N ) ) THEN INFO = -9 END IF IF( INFO.NE.0 ) THEN CALL XERBLA( 'ZCPOSV', -INFO ) RETURN END IF * * Quick return if (N.EQ.0). * IF( N.EQ.0 ) $ RETURN * * Skip single precision iterative refinement if a priori slower * than double precision factorization. * IF( .NOT.DOITREF ) THEN ITER = -1 GO TO 40 END IF * * Compute some constants. * ANRM = ZLANHE( 'I', UPLO, N, A, LDA, RWORK ) EPS = DLAMCH( 'Epsilon' ) CTE = ANRM*EPS*SQRT( DBLE( N ) )*BWDMAX * * Set the indices PTSA, PTSX for referencing SA and SX in SWORK. * PTSA = 1 PTSX = PTSA + N*N * * Convert B from double precision to single precision and store the * result in SX. * CALL ZLAG2C( N, NRHS, B, LDB, SWORK( PTSX ), N, INFO ) * IF( INFO.NE.0 ) THEN ITER = -2 GO TO 40 END IF * * Convert A from double precision to single precision and store the * result in SA. * CALL ZLAT2C( UPLO, N, A, LDA, SWORK( PTSA ), N, INFO ) * IF( INFO.NE.0 ) THEN ITER = -2 GO TO 40 END IF * * Compute the Cholesky factorization of SA. * CALL CPOTRF( UPLO, N, SWORK( PTSA ), N, INFO ) * IF( INFO.NE.0 ) THEN ITER = -3 GO TO 40 END IF * * Solve the system SA*SX = SB. * CALL CPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N, $ INFO ) * * Convert SX back to COMPLEX*16 * CALL CLAG2Z( N, NRHS, SWORK( PTSX ), N, X, LDX, INFO ) * * Compute R = B - AX (R is WORK). * CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N ) * CALL ZHEMM( 'Left', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE, $ WORK, N ) * * Check whether the NRHS normwise backward errors satisfy the * stopping criterion. If yes, set ITER=0 and return. * DO I = 1, NRHS XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) ) RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) ) IF( RNRM.GT.XNRM*CTE ) $ GO TO 10 END DO * * If we are here, the NRHS normwise backward errors satisfy the * stopping criterion. We are good to exit. * ITER = 0 RETURN * 10 CONTINUE * DO 30 IITER = 1, ITERMAX * * Convert R (in WORK) from double precision to single precision * and store the result in SX. * CALL ZLAG2C( N, NRHS, WORK, N, SWORK( PTSX ), N, INFO ) * IF( INFO.NE.0 ) THEN ITER = -2 GO TO 40 END IF * * Solve the system SA*SX = SR. * CALL CPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N, $ INFO ) * * Convert SX back to double precision and update the current * iterate. * CALL CLAG2Z( N, NRHS, SWORK( PTSX ), N, WORK, N, INFO ) * DO I = 1, NRHS CALL ZAXPY( N, ONE, WORK( 1, I ), 1, X( 1, I ), 1 ) END DO * * Compute R = B - AX (R is WORK). * CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N ) * CALL ZHEMM( 'L', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE, $ WORK, N ) * * Check whether the NRHS normwise backward errors satisfy the * stopping criterion. If yes, set ITER=IITER>0 and return. * DO I = 1, NRHS XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) ) RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) ) IF( RNRM.GT.XNRM*CTE ) $ GO TO 20 END DO * * If we are here, the NRHS normwise backward errors satisfy the * stopping criterion, we are good to exit. * ITER = IITER * RETURN * 20 CONTINUE * 30 CONTINUE * * If we are at this place of the code, this is because we have * performed ITER=ITERMAX iterations and never satisfied the * stopping criterion, set up the ITER flag accordingly and follow * up on double precision routine. * ITER = -ITERMAX - 1 * 40 CONTINUE * * Single-precision iterative refinement failed to converge to a * satisfactory solution, so we resort to double precision. * CALL ZPOTRF( UPLO, N, A, LDA, INFO ) * IF( INFO.NE.0 ) $ RETURN * CALL ZLACPY( 'All', N, NRHS, B, LDB, X, LDX ) CALL ZPOTRS( UPLO, N, NRHS, A, LDA, X, LDX, INFO ) * RETURN * * End of ZCPOSV. * END
Java
/* The <sys/stat.h> header defines a struct that is used in the stat() and * fstat functions. The information in this struct comes from the i-node of * some file. These calls are the only approved way to inspect i-nodes. */ #ifndef _STAT_H #define _STAT_H struct stat { dev_t st_dev; /* major/minor device number */ ino_t st_ino; /* i-node number */ mode_t st_mode; /* file mode, protection bits, etc. */ short int st_nlink; /* # links; TEMPORARY HACK: should be nlink_t*/ uid_t st_uid; /* uid of the file's owner */ short int st_gid; /* gid; TEMPORARY HACK: should be gid_t */ dev_t st_rdev; off_t st_size; /* file size */ time_t st_atime; /* time of last access */ time_t st_mtime; /* time of last data modification */ time_t st_ctime; /* time of last file status change */ }; /* Traditional mask definitions for st_mode. */ /* The ugly casts on only some of the definitions are to avoid suprising sign * extensions such as S_IFREG != (mode_t) S_IFREG when ints are 32 bits. */ #define S_IFMT ((mode_t) 0170000) /* type of file */ #define S_IFREG ((mode_t) 0100000) /* regular */ #define S_IFBLK 0060000 /* block special */ #define S_IFDIR 0040000 /* directory */ #define S_IFCHR 0020000 /* character special */ #define S_IFIFO 0010000 /* this is a FIFO */ #define S_ISUID 0004000 /* set user id on execution */ #define S_ISGID 0002000 /* set group id on execution */ /* next is reserved for future use */ #define S_ISVTX 01000 /* save swapped text even after use */ /* POSIX masks for st_mode. */ #define S_IRWXU 00700 /* owner: rwx------ */ #define S_IRUSR 00400 /* owner: r-------- */ #define S_IWUSR 00200 /* owner: -w------- */ #define S_IXUSR 00100 /* owner: --x------ */ #define S_IRWXG 00070 /* group: ---rwx--- */ #define S_IRGRP 00040 /* group: ---r----- */ #define S_IWGRP 00020 /* group: ----w---- */ #define S_IXGRP 00010 /* group: -----x--- */ #define S_IRWXO 00007 /* others: ------rwx */ #define S_IROTH 00004 /* others: ------r-- */ #define S_IWOTH 00002 /* others: -------w- */ #define S_IXOTH 00001 /* others: --------x */ /* The following macros test st_mode (from POSIX Sec. 5.6.1.1). */ #define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) /* is a reg file */ #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) /* is a directory */ #define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) /* is a char spec */ #define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) /* is a block spec */ #define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) /* is a pipe/FIFO */ /* Function Prototypes. */ #ifndef _ANSI_H #include <ansi.h> #endif _PROTOTYPE( int chmod, (const char *_path, Mode_t _mode) ); _PROTOTYPE( int fstat, (int _fildes, struct stat *_buf) ); _PROTOTYPE( int mkdir, (const char *_path, int _mode) ); _PROTOTYPE( int mkfifo, (const char *_path, int _mode) ); _PROTOTYPE( int stat , (const char *_path, struct stat *_buf) ); _PROTOTYPE( mode_t umask, (int _cmask) ); #endif /* _STAT_H */
Java
/* * Copyright (c) 2011-2013, Longxiang He <[email protected]>, * SmeshLink Technology Co. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY. * * This file is part of the CoAP.NET, a CoAP framework in C#. * Please see README for more information. */ using System; namespace CoAP { /// <summary> /// Represents an event when a response arrives for a request. /// </summary> public class ResponseEventArgs : EventArgs { private Response _response; /// <summary> /// /// </summary> public ResponseEventArgs(Response response) { _response = response; } /// <summary> /// Gets the incoming response. /// </summary> public Response Response { get { return _response; } } } }
Java
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_SERVICES_HTML_VIEWER_BLINK_PLATFORM_IMPL_H_ #define MOJO_SERVICES_HTML_VIEWER_BLINK_PLATFORM_IMPL_H_ #include "base/memory/scoped_ptr.h" #include "base/message_loop/message_loop.h" #include "base/threading/thread_local_storage.h" #include "base/timer/timer.h" #include "cc/blink/web_compositor_support_impl.h" #include "mojo/services/html_viewer/blink_resource_map.h" #include "mojo/services/html_viewer/webmimeregistry_impl.h" #include "mojo/services/html_viewer/webthemeengine_impl.h" #include "third_party/WebKit/public/platform/Platform.h" #include "third_party/WebKit/public/platform/WebScrollbarBehavior.h" namespace html_viewer { class BlinkPlatformImpl : public blink::Platform { public: explicit BlinkPlatformImpl(); virtual ~BlinkPlatformImpl(); // blink::Platform methods: virtual blink::WebMimeRegistry* mimeRegistry(); virtual blink::WebThemeEngine* themeEngine(); virtual blink::WebString defaultLocale(); virtual double currentTime(); virtual double monotonicallyIncreasingTime(); virtual void cryptographicallyRandomValues( unsigned char* buffer, size_t length); virtual void setSharedTimerFiredFunction(void (*func)()); virtual void setSharedTimerFireInterval(double interval_seconds); virtual void stopSharedTimer(); virtual void callOnMainThread(void (*func)(void*), void* context); virtual bool isThreadedCompositingEnabled(); virtual blink::WebCompositorSupport* compositorSupport(); virtual blink::WebURLLoader* createURLLoader(); virtual blink::WebSocketHandle* createWebSocketHandle(); virtual blink::WebString userAgent(); virtual blink::WebData parseDataURL( const blink::WebURL& url, blink::WebString& mime_type, blink::WebString& charset); virtual blink::WebURLError cancelledError(const blink::WebURL& url) const; virtual blink::WebThread* createThread(const char* name); virtual blink::WebThread* currentThread(); virtual void yieldCurrentThread(); virtual blink::WebWaitableEvent* createWaitableEvent(); virtual blink::WebWaitableEvent* waitMultipleEvents( const blink::WebVector<blink::WebWaitableEvent*>& events); virtual blink::WebScrollbarBehavior* scrollbarBehavior(); virtual const unsigned char* getTraceCategoryEnabledFlag( const char* category_name); virtual blink::WebData loadResource(const char* name); private: void SuspendSharedTimer(); void ResumeSharedTimer(); void DoTimeout() { if (shared_timer_func_ && !shared_timer_suspended_) shared_timer_func_(); } static void DestroyCurrentThread(void*); base::MessageLoop* main_loop_; base::OneShotTimer<BlinkPlatformImpl> shared_timer_; void (*shared_timer_func_)(); double shared_timer_fire_time_; bool shared_timer_fire_time_was_set_while_suspended_; int shared_timer_suspended_; // counter base::ThreadLocalStorage::Slot current_thread_slot_; cc_blink::WebCompositorSupportImpl compositor_support_; WebThemeEngineImpl theme_engine_; WebMimeRegistryImpl mime_registry_; blink::WebScrollbarBehavior scrollbar_behavior_; BlinkResourceMap blink_resource_map_; DISALLOW_COPY_AND_ASSIGN(BlinkPlatformImpl); }; } // namespace html_viewer #endif // MOJO_SERVICES_HTML_VIEWER_BLINK_PLATFORM_IMPL_H_
Java
/* * Copyright 2009, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // This file contains the definition of DrawElement. #include "core/cross/precompile.h" #include "core/cross/draw_element.h" #include "core/cross/renderer.h" #include "core/cross/error.h" namespace o3d { O3D_DEFN_CLASS(DrawElement, ParamObject); const char* DrawElement::kMaterialParamName = O3D_STRING_CONSTANT("material"); ObjectBase::Ref DrawElement::Create(ServiceLocator* service_locator) { Renderer* renderer = service_locator->GetService<Renderer>(); if (NULL == renderer) { O3D_ERROR(service_locator) << "No Render Device Available"; return ObjectBase::Ref(); } return ObjectBase::Ref(renderer->CreateDrawElement()); } DrawElement::DrawElement(ServiceLocator* service_locator) : ParamObject(service_locator), owner_(NULL) { RegisterParamRef(kMaterialParamName, &material_param_ref_); } DrawElement::~DrawElement() { } void DrawElement::SetOwner(Element* new_owner) { // Hold a ref to ourselves so we make sure we don't get deleted while // as we remove ourself from our current owner. DrawElement::Ref temp(this); if (owner_ != NULL) { bool removed = owner_->RemoveDrawElement(this); DLOG_ASSERT(removed); } owner_ = new_owner; if (new_owner) { new_owner->AddDrawElement(this); } } } // namespace o3d
Java
#ifndef NT2_INCLUDE_FUNCTIONS_SLIDE_HPP_INCLUDED #define NT2_INCLUDE_FUNCTIONS_SLIDE_HPP_INCLUDED #include <nt2/memory/include/functions/slide.hpp> #include <nt2/memory/include/functions/scalar/slide.hpp> #include <nt2/memory/include/functions/simd/slide.hpp> #endif
Java
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/download/mhtml_generation_manager.h" #include <map> #include <queue> #include <utility> #include "base/bind.h" #include "base/files/file.h" #include "base/guid.h" #include "base/macros.h" #include "base/scoped_observer.h" #include "base/stl_util.h" #include "base/strings/stringprintf.h" #include "content/browser/bad_message.h" #include "content/browser/frame_host/frame_tree_node.h" #include "content/browser/frame_host/render_frame_host_impl.h" #include "content/common/frame_messages.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/render_process_host_observer.h" #include "content/public/browser/web_contents.h" #include "content/public/common/mhtml_generation_params.h" #include "net/base/mime_util.h" namespace content { // The class and all of its members live on the UI thread. Only static methods // are executed on other threads. class MHTMLGenerationManager::Job : public RenderProcessHostObserver { public: Job(int job_id, WebContents* web_contents, const MHTMLGenerationParams& params, const GenerateMHTMLCallback& callback); ~Job() override; int id() const { return job_id_; } void set_browser_file(base::File file) { browser_file_ = std::move(file); } const GenerateMHTMLCallback& callback() const { return callback_; } // Indicates whether we expect a message from the |sender| at this time. // We expect only one message per frame - therefore calling this method // will always clear |frame_tree_node_id_of_busy_frame_|. bool IsMessageFromFrameExpected(RenderFrameHostImpl* sender); // Handler for FrameHostMsg_SerializeAsMHTMLResponse (a notification from the // renderer that the MHTML generation for previous frame has finished). // Returns |true| upon success; |false| otherwise. bool OnSerializeAsMHTMLResponse( const std::set<std::string>& digests_of_uris_of_serialized_resources); // Sends IPC to the renderer, asking for MHTML generation of the next frame. // // Returns true if the message was sent successfully; false otherwise. bool SendToNextRenderFrame(); // Indicates if more calls to SendToNextRenderFrame are needed. bool IsDone() const { bool waiting_for_response_from_renderer = frame_tree_node_id_of_busy_frame_ != FrameTreeNode::kFrameTreeNodeInvalidId; bool no_more_requests_to_send = pending_frame_tree_node_ids_.empty(); return !waiting_for_response_from_renderer && no_more_requests_to_send; } // Close the file on the file thread and respond back on the UI thread with // file size. void CloseFile(base::Callback<void(int64_t file_size)> callback); // RenderProcessHostObserver: void RenderProcessExited(RenderProcessHost* host, base::TerminationStatus status, int exit_code) override; void RenderProcessHostDestroyed(RenderProcessHost* host) override; void MarkAsFinished(); private: static int64_t CloseFileOnFileThread(base::File file); void AddFrame(RenderFrameHost* render_frame_host); // Creates a new map with values (content ids) the same as in // |frame_tree_node_to_content_id_| map, but with the keys translated from // frame_tree_node_id into a |site_instance|-specific routing_id. std::map<int, std::string> CreateFrameRoutingIdToContentId( SiteInstance* site_instance); // Id used to map renderer responses to jobs. // See also MHTMLGenerationManager::id_to_job_ map. int job_id_; // User-configurable parameters. Includes the file location, binary encoding // choices, and whether to skip storing resources marked // Cache-Control: no-store. MHTMLGenerationParams params_; // The IDs of frames that still need to be processed. std::queue<int> pending_frame_tree_node_ids_; // Identifies a frame to which we've sent FrameMsg_SerializeAsMHTML but for // which we didn't yet process FrameHostMsg_SerializeAsMHTMLResponse via // OnSerializeAsMHTMLResponse. int frame_tree_node_id_of_busy_frame_; // The handle to the file the MHTML is saved to for the browser process. base::File browser_file_; // Map from frames to content ids (see WebFrameSerializer::generateMHTMLParts // for more details about what "content ids" are and how they are used). std::map<int, std::string> frame_tree_node_to_content_id_; // MIME multipart boundary to use in the MHTML doc. std::string mhtml_boundary_marker_; // Digests of URIs of already generated MHTML parts. std::set<std::string> digests_of_already_serialized_uris_; std::string salt_; // The callback to call once generation is complete. const GenerateMHTMLCallback callback_; // Whether the job is finished (set to true only for the short duration of // time between MHTMLGenerationManager::JobFinished is called and the job is // destroyed by MHTMLGenerationManager::OnFileClosed). bool is_finished_; // RAII helper for registering this Job as a RenderProcessHost observer. ScopedObserver<RenderProcessHost, MHTMLGenerationManager::Job> observed_renderer_process_host_; DISALLOW_COPY_AND_ASSIGN(Job); }; MHTMLGenerationManager::Job::Job(int job_id, WebContents* web_contents, const MHTMLGenerationParams& params, const GenerateMHTMLCallback& callback) : job_id_(job_id), params_(params), frame_tree_node_id_of_busy_frame_(FrameTreeNode::kFrameTreeNodeInvalidId), mhtml_boundary_marker_(net::GenerateMimeMultipartBoundary()), salt_(base::GenerateGUID()), callback_(callback), is_finished_(false), observed_renderer_process_host_(this) { DCHECK_CURRENTLY_ON(BrowserThread::UI); web_contents->ForEachFrame(base::Bind( &MHTMLGenerationManager::Job::AddFrame, base::Unretained(this))); // Safe because ForEachFrame is synchronous. // Main frame needs to be processed first. DCHECK(!pending_frame_tree_node_ids_.empty()); DCHECK(FrameTreeNode::GloballyFindByID(pending_frame_tree_node_ids_.front()) ->parent() == nullptr); } MHTMLGenerationManager::Job::~Job() { DCHECK_CURRENTLY_ON(BrowserThread::UI); } std::map<int, std::string> MHTMLGenerationManager::Job::CreateFrameRoutingIdToContentId( SiteInstance* site_instance) { std::map<int, std::string> result; for (const auto& it : frame_tree_node_to_content_id_) { int ftn_id = it.first; const std::string& content_id = it.second; FrameTreeNode* ftn = FrameTreeNode::GloballyFindByID(ftn_id); if (!ftn) continue; int routing_id = ftn->render_manager()->GetRoutingIdForSiteInstance(site_instance); if (routing_id == MSG_ROUTING_NONE) continue; result[routing_id] = content_id; } return result; } bool MHTMLGenerationManager::Job::SendToNextRenderFrame() { DCHECK(browser_file_.IsValid()); DCHECK(!pending_frame_tree_node_ids_.empty()); FrameMsg_SerializeAsMHTML_Params ipc_params; ipc_params.job_id = job_id_; ipc_params.mhtml_boundary_marker = mhtml_boundary_marker_; ipc_params.mhtml_binary_encoding = params_.use_binary_encoding; ipc_params.mhtml_cache_control_policy = params_.cache_control_policy; int frame_tree_node_id = pending_frame_tree_node_ids_.front(); pending_frame_tree_node_ids_.pop(); ipc_params.is_last_frame = pending_frame_tree_node_ids_.empty(); FrameTreeNode* ftn = FrameTreeNode::GloballyFindByID(frame_tree_node_id); if (!ftn) // The contents went away. return false; RenderFrameHost* rfh = ftn->current_frame_host(); // Get notified if the target of the IPC message dies between responding. observed_renderer_process_host_.RemoveAll(); observed_renderer_process_host_.Add(rfh->GetProcess()); // Tell the renderer to skip (= deduplicate) already covered MHTML parts. ipc_params.salt = salt_; ipc_params.digests_of_uris_to_skip = digests_of_already_serialized_uris_; ipc_params.destination_file = IPC::GetPlatformFileForTransit( browser_file_.GetPlatformFile(), false); // |close_source_handle|. ipc_params.frame_routing_id_to_content_id = CreateFrameRoutingIdToContentId(rfh->GetSiteInstance()); // Send the IPC asking the renderer to serialize the frame. DCHECK_EQ(FrameTreeNode::kFrameTreeNodeInvalidId, frame_tree_node_id_of_busy_frame_); frame_tree_node_id_of_busy_frame_ = frame_tree_node_id; rfh->Send(new FrameMsg_SerializeAsMHTML(rfh->GetRoutingID(), ipc_params)); return true; } void MHTMLGenerationManager::Job::RenderProcessExited( RenderProcessHost* host, base::TerminationStatus status, int exit_code) { DCHECK_CURRENTLY_ON(BrowserThread::UI); MHTMLGenerationManager::GetInstance()->RenderProcessExited(this); } void MHTMLGenerationManager::Job::MarkAsFinished() { DCHECK(!is_finished_); is_finished_ = true; // Stopping RenderProcessExited notifications is needed to avoid calling // JobFinished twice. See also https://crbug.com/612098. observed_renderer_process_host_.RemoveAll(); } void MHTMLGenerationManager::Job::AddFrame(RenderFrameHost* render_frame_host) { auto* rfhi = static_cast<RenderFrameHostImpl*>(render_frame_host); int frame_tree_node_id = rfhi->frame_tree_node()->frame_tree_node_id(); pending_frame_tree_node_ids_.push(frame_tree_node_id); std::string guid = base::GenerateGUID(); std::string content_id = base::StringPrintf("<frame-%d-%[email protected]>", frame_tree_node_id, guid.c_str()); frame_tree_node_to_content_id_[frame_tree_node_id] = content_id; } void MHTMLGenerationManager::Job::RenderProcessHostDestroyed( RenderProcessHost* host) { DCHECK_CURRENTLY_ON(BrowserThread::UI); observed_renderer_process_host_.Remove(host); } void MHTMLGenerationManager::Job::CloseFile( base::Callback<void(int64_t)> callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); if (!browser_file_.IsValid()) { callback.Run(-1); return; } BrowserThread::PostTaskAndReplyWithResult( BrowserThread::FILE, FROM_HERE, base::Bind(&MHTMLGenerationManager::Job::CloseFileOnFileThread, base::Passed(std::move(browser_file_))), callback); } bool MHTMLGenerationManager::Job::IsMessageFromFrameExpected( RenderFrameHostImpl* sender) { int sender_id = sender->frame_tree_node()->frame_tree_node_id(); if (sender_id != frame_tree_node_id_of_busy_frame_) return false; // We only expect one message per frame - let's make sure subsequent messages // from the same |sender| will be rejected. frame_tree_node_id_of_busy_frame_ = FrameTreeNode::kFrameTreeNodeInvalidId; return true; } bool MHTMLGenerationManager::Job::OnSerializeAsMHTMLResponse( const std::set<std::string>& digests_of_uris_of_serialized_resources) { // Renderer should be deduping resources with the same uris. DCHECK_EQ(0u, base::STLSetIntersection<std::set<std::string>>( digests_of_already_serialized_uris_, digests_of_uris_of_serialized_resources).size()); digests_of_already_serialized_uris_.insert( digests_of_uris_of_serialized_resources.begin(), digests_of_uris_of_serialized_resources.end()); if (pending_frame_tree_node_ids_.empty()) return true; // Report success - all frames have been processed. return SendToNextRenderFrame(); } // static int64_t MHTMLGenerationManager::Job::CloseFileOnFileThread(base::File file) { DCHECK_CURRENTLY_ON(BrowserThread::FILE); DCHECK(file.IsValid()); int64_t file_size = file.GetLength(); file.Close(); return file_size; } MHTMLGenerationManager* MHTMLGenerationManager::GetInstance() { return base::Singleton<MHTMLGenerationManager>::get(); } MHTMLGenerationManager::MHTMLGenerationManager() : next_job_id_(0) {} MHTMLGenerationManager::~MHTMLGenerationManager() { STLDeleteValues(&id_to_job_); } void MHTMLGenerationManager::SaveMHTML(WebContents* web_contents, const MHTMLGenerationParams& params, const GenerateMHTMLCallback& callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); int job_id = NewJob(web_contents, params, callback); BrowserThread::PostTaskAndReplyWithResult( BrowserThread::FILE, FROM_HERE, base::Bind(&MHTMLGenerationManager::CreateFile, params.file_path), base::Bind(&MHTMLGenerationManager::OnFileAvailable, base::Unretained(this), // Safe b/c |this| is a singleton. job_id)); } void MHTMLGenerationManager::OnSerializeAsMHTMLResponse( RenderFrameHostImpl* sender, int job_id, bool mhtml_generation_in_renderer_succeeded, const std::set<std::string>& digests_of_uris_of_serialized_resources) { DCHECK_CURRENTLY_ON(BrowserThread::UI); Job* job = FindJob(job_id); if (!job || !job->IsMessageFromFrameExpected(sender)) { NOTREACHED(); ReceivedBadMessage(sender->GetProcess(), bad_message::DWNLD_INVALID_SERIALIZE_AS_MHTML_RESPONSE); return; } if (!mhtml_generation_in_renderer_succeeded) { JobFinished(job, JobStatus::FAILURE); return; } if (!job->OnSerializeAsMHTMLResponse( digests_of_uris_of_serialized_resources)) { JobFinished(job, JobStatus::FAILURE); return; } if (job->IsDone()) JobFinished(job, JobStatus::SUCCESS); } // static base::File MHTMLGenerationManager::CreateFile(const base::FilePath& file_path) { DCHECK_CURRENTLY_ON(BrowserThread::FILE); // SECURITY NOTE: A file descriptor to the file created below will be passed // to multiple renderer processes which (in out-of-process iframes mode) can // act on behalf of separate web principals. Therefore it is important to // only allow writing to the file and forbid reading from the file (as this // would allow reading content generated by other renderers / other web // principals). uint32_t file_flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE; base::File browser_file(file_path, file_flags); if (!browser_file.IsValid()) { LOG(ERROR) << "Failed to create file to save MHTML at: " << file_path.value(); } return browser_file; } void MHTMLGenerationManager::OnFileAvailable(int job_id, base::File browser_file) { DCHECK_CURRENTLY_ON(BrowserThread::UI); Job* job = FindJob(job_id); DCHECK(job); if (!browser_file.IsValid()) { LOG(ERROR) << "Failed to create file"; JobFinished(job, JobStatus::FAILURE); return; } job->set_browser_file(std::move(browser_file)); if (!job->SendToNextRenderFrame()) { JobFinished(job, JobStatus::FAILURE); } } void MHTMLGenerationManager::JobFinished(Job* job, JobStatus job_status) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(job); job->MarkAsFinished(); job->CloseFile( base::Bind(&MHTMLGenerationManager::OnFileClosed, base::Unretained(this), // Safe b/c |this| is a singleton. job->id(), job_status)); } void MHTMLGenerationManager::OnFileClosed(int job_id, JobStatus job_status, int64_t file_size) { DCHECK_CURRENTLY_ON(BrowserThread::UI); Job* job = FindJob(job_id); job->callback().Run(job_status == JobStatus::SUCCESS ? file_size : -1); id_to_job_.erase(job_id); delete job; } int MHTMLGenerationManager::NewJob(WebContents* web_contents, const MHTMLGenerationParams& params, const GenerateMHTMLCallback& callback) { DCHECK_CURRENTLY_ON(BrowserThread::UI); int job_id = next_job_id_++; id_to_job_[job_id] = new Job(job_id, web_contents, params, callback); return job_id; } MHTMLGenerationManager::Job* MHTMLGenerationManager::FindJob(int job_id) { DCHECK_CURRENTLY_ON(BrowserThread::UI); IDToJobMap::iterator iter = id_to_job_.find(job_id); if (iter == id_to_job_.end()) { NOTREACHED(); return nullptr; } return iter->second; } void MHTMLGenerationManager::RenderProcessExited(Job* job) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(job); JobFinished(job, JobStatus::FAILURE); } } // namespace content
Java
package com.skcraft.plume.event.block; import com.google.common.base.Functions; import com.google.common.base.Predicate; import com.skcraft.plume.event.BulkEvent; import com.skcraft.plume.event.Cause; import com.skcraft.plume.event.DelegateEvent; import com.skcraft.plume.event.Result; import com.skcraft.plume.util.Location3i; import net.minecraft.world.World; import java.util.List; import static com.google.common.base.Preconditions.checkNotNull; abstract class BlockEvent extends DelegateEvent implements BulkEvent { private final World world; protected BlockEvent(Cause cause, World world) { super(cause); checkNotNull(world, "world"); this.world = world; } /** * Get the world. * * @return The world */ public World getWorld() { return world; } /** * Get a list of affected locations. * * @return A list of affected locations */ public abstract List<Location3i> getLocations(); /** * Filter the list of affected blocks with the given predicate. If the * predicate returns {@code false}, then the block is removed. * * @param predicate the predicate * @param cancelEventOnFalse true to cancel the event and clear the block * list once the predicate returns {@code false} * @return Whether one or more blocks were filtered out */ public boolean filterLocations(Predicate<Location3i> predicate, boolean cancelEventOnFalse) { return filter(getLocations(), Functions.<Location3i>identity(), predicate, cancelEventOnFalse); } @Override public Result getResult() { if (getLocations().isEmpty()) { return Result.DENY; } return super.getResult(); } @Override public Result getExplicitResult() { return super.getResult(); } }
Java