code
stringlengths
4
1.01M
language
stringclasses
2 values
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test case for issue #2843. // proto! streamp ( open:send<T:Send> { data(T) -> open<T> } ) fn rendezvous() { let (s, c) = streamp::init(); let streams: ~[streamp::client::open<int>] = ~[c]; error!("%?", streams[0]); } pub fn main() { //os::getenv("FOO"); rendezvous(); }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.phoenix.pherf.rules; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataSequence; import org.apache.phoenix.pherf.configuration.DataTypeMapping; import java.util.concurrent.atomic.AtomicLong; public class SequentialIntegerDataGenerator implements RuleBasedDataGenerator { private final Column columnRule; private final AtomicLong counter; private final long minValue; private final long maxValue; public SequentialIntegerDataGenerator(Column columnRule) { Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); Preconditions.checkArgument(isIntegerType(columnRule.getType())); this.columnRule = columnRule; minValue = columnRule.getMinValue(); maxValue = columnRule.getMaxValue(); counter = new AtomicLong(0); } /** * Note that this method rolls over for attempts to get larger than maxValue * @return new DataValue */ @Override public DataValue getDataValue() { return new DataValue(columnRule.getType(), String.valueOf((counter.getAndIncrement() % (maxValue - minValue + 1)) + minValue)); } // Probably could go into a util class in the future boolean isIntegerType(DataTypeMapping mapping) { switch (mapping) { case BIGINT: case INTEGER: case TINYINT: case UNSIGNED_LONG: return true; default: return false; } } }
Java
package org.ovirt.engine.core.bll; import org.ovirt.engine.core.common.action.VdcActionParametersBase; public abstract class ConfigCommandBase<T extends VdcActionParametersBase> extends CommandBase<T> { protected ConfigCommandBase(T parameters) { super(parameters); } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.infinispan.remote; import org.apache.camel.test.infra.infinispan.services.InfinispanService; import org.apache.camel.test.infra.infinispan.services.InfinispanServiceFactory; import org.infinispan.client.hotrod.RemoteCache; import org.infinispan.commons.api.BasicCache; import org.infinispan.configuration.cache.CacheMode; import org.jgroups.util.UUID; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; import org.testcontainers.shaded.org.apache.commons.lang.SystemUtils; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; public class InfinispanRemoteConfigurationIT { @RegisterExtension static InfinispanService service = InfinispanServiceFactory.createService(); @Test public void remoteCacheWithoutProperties() throws Exception { InfinispanRemoteConfiguration configuration = new InfinispanRemoteConfiguration(); configuration.setHosts(service.host() + ":" + service.port()); configuration.setSecure(true); configuration.setUsername(service.username()); configuration.setPassword(service.password()); configuration.setSecurityServerName("infinispan"); configuration.setSaslMechanism("DIGEST-MD5"); configuration.setSecurityRealm("default"); if (SystemUtils.IS_OS_MAC) { configuration.addConfigurationProperty( "infinispan.client.hotrod.client_intelligence", "BASIC"); } try (InfinispanRemoteManager manager = new InfinispanRemoteManager(configuration)) { manager.start(); manager.getCacheContainer().administration() .getOrCreateCache( "misc_cache", new org.infinispan.configuration.cache.ConfigurationBuilder() .clustering() .cacheMode(CacheMode.DIST_SYNC).build()); BasicCache<Object, Object> cache = manager.getCache("misc_cache"); assertNotNull(cache); assertTrue(cache instanceof RemoteCache); String key = UUID.randomUUID().toString(); assertNull(cache.put(key, "val1")); assertNull(cache.put(key, "val2")); } } @Test public void remoteCacheWithProperties() throws Exception { InfinispanRemoteConfiguration configuration = new InfinispanRemoteConfiguration(); configuration.setHosts(service.host() + ":" + service.port()); configuration.setSecure(true); configuration.setUsername(service.username()); configuration.setPassword(service.password()); configuration.setSecurityServerName("infinispan"); configuration.setSaslMechanism("DIGEST-MD5"); configuration.setSecurityRealm("default"); if (SystemUtils.IS_OS_MAC) { configuration.setConfigurationUri("infinispan/client-mac.properties"); } else { configuration.setConfigurationUri("infinispan/client.properties"); } try (InfinispanRemoteManager manager = new InfinispanRemoteManager(configuration)) { manager.start(); manager.getCacheContainer().administration() .getOrCreateCache( "misc_cache", new org.infinispan.configuration.cache.ConfigurationBuilder() .clustering() .cacheMode(CacheMode.DIST_SYNC).build()); BasicCache<Object, Object> cache = manager.getCache("misc_cache"); assertNotNull(cache); assertTrue(cache instanceof RemoteCache); String key = UUID.randomUUID().toString(); assertNull(cache.put(key, "val1")); assertNotNull(cache.put(key, "val2")); } } }
Java
package animate import ( "testing" ) func TestGeocode(t *testing.T) { testData := []struct { text string }{ {"funny cat"}, } command := Animate() for _, d := range testData { rsp, err := command.Exec("animate", d.text) if err != nil { t.Fatal(err) } if rsp == nil { t.Fatal("expected result, got nil") } } }
Java
// Copyright 2015 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.profiler; import static com.google.common.truth.Truth.assertThat; import static java.nio.charset.StandardCharsets.ISO_8859_1; import static org.junit.Assert.fail; import com.google.devtools.build.lib.clock.BlazeClock; import com.google.devtools.build.lib.clock.Clock; import com.google.devtools.build.lib.profiler.Profiler.ProfiledTaskKinds; import com.google.devtools.build.lib.profiler.analysis.ProfileInfo; import com.google.devtools.build.lib.testutil.FoundationTestCase; import com.google.devtools.build.lib.testutil.ManualClock; import com.google.devtools.build.lib.testutil.Suite; import com.google.devtools.build.lib.testutil.TestSpec; import com.google.devtools.build.lib.vfs.FileSystemUtils; import com.google.devtools.build.lib.vfs.Path; import java.io.InputStream; import java.io.OutputStream; import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.Deflater; import java.util.zip.DeflaterOutputStream; import java.util.zip.Inflater; import java.util.zip.InflaterInputStream; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * Unit tests for the profiler. */ @TestSpec(size = Suite.MEDIUM_TESTS) // testConcurrentProfiling takes ~700ms, testProfiler 100ms. @RunWith(JUnit4.class) public class ProfilerTest extends FoundationTestCase { private Path cacheDir; private Profiler profiler = Profiler.instance(); private ManualClock clock; @Before public final void createCacheDirectory() throws Exception { cacheDir = scratch.dir("/tmp"); } @Before public final void setManualClock() { clock = new ManualClock(); BlazeClock.setClock(clock); } @Test public void testProfilerActivation() throws Exception { Path cacheFile = cacheDir.getRelative("profile1.dat"); assertThat(profiler.isActive()).isFalse(); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); assertThat(profiler.isActive()).isTrue(); profiler.stop(); assertThat(profiler.isActive()).isFalse(); } @Test public void testTaskDetails() throws Exception { Path cacheFile = cacheDir.getRelative("profile1.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.startTask(ProfilerTask.ACTION, "action task"); profiler.logEvent(ProfilerTask.TEST, "event"); profiler.completeTask(ProfilerTask.ACTION); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); ProfileInfo.Task task = info.allTasksById.get(0); assertThat(task.id).isEqualTo(1); assertThat(task.type).isEqualTo(ProfilerTask.ACTION); assertThat(task.getDescription()).isEqualTo("action task"); task = info.allTasksById.get(1); assertThat(task.id).isEqualTo(2); assertThat(task.type).isEqualTo(ProfilerTask.TEST); assertThat(task.getDescription()).isEqualTo("event"); } @Test public void testProfiler() throws Exception { Path cacheFile = cacheDir.getRelative("profile1.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.PHASE, "profiler start"); profiler.startTask(ProfilerTask.ACTION, "complex task"); profiler.logEvent(ProfilerTask.PHASE, "event1"); profiler.startTask(ProfilerTask.ACTION_CHECK, "complex subtask"); // next task takes less than 10 ms and should be only aggregated profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.VFS_STAT, "stat1"); long startTime = BlazeClock.instance().nanoTime(); clock.advanceMillis(20); // this one will take at least 20 ms and should be present profiler.logSimpleTask(startTime, ProfilerTask.VFS_STAT, "stat2"); profiler.completeTask(ProfilerTask.ACTION_CHECK); profiler.completeTask(ProfilerTask.ACTION); profiler.stop(); // all other calls to profiler should be ignored profiler.logEvent(ProfilerTask.PHASE, "should be ignored"); // normally this would cause an exception but it is ignored since profiler // is disabled profiler.completeTask(ProfilerTask.ACTION_EXECUTE); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); assertThat(info.allTasksById).hasSize(6); // only 5 tasks + finalization should be recorded ProfileInfo.Task task = info.allTasksById.get(0); assertThat(task.stats.isEmpty()).isTrue(); task = info.allTasksById.get(1); int count = 0; for (ProfileInfo.AggregateAttr attr : task.getStatAttrArray()) { if (attr != null) { count++; } } assertThat(count).isEqualTo(2); // only children are GENERIC and ACTION_CHECK assertThat(ProfilerTask.TASK_COUNT).isEqualTo(task.aggregatedStats.toArray().length); assertThat(task.aggregatedStats.getAttr(ProfilerTask.VFS_STAT).count).isEqualTo(2); task = info.allTasksById.get(2); assertThat(task.durationNanos).isEqualTo(0); task = info.allTasksById.get(3); assertThat(task.stats.getAttr(ProfilerTask.VFS_STAT).count).isEqualTo(2); assertThat(task.subtasks).hasLength(1); assertThat(task.subtasks[0].getDescription()).isEqualTo("stat2"); // assert that startTime grows with id long time = -1; for (ProfileInfo.Task t : info.allTasksById) { assertThat(t.startTime).isAtLeast(time); time = t.startTime; } } @Test public void testProfilerRecordingAllEvents() throws Exception { Path cacheFile = cacheDir.getRelative("profile1.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", true, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.startTask(ProfilerTask.ACTION, "action task"); // Next task takes less than 10 ms but should be recorded anyway. clock.advanceMillis(1); profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.VFS_STAT, "stat1"); profiler.completeTask(ProfilerTask.ACTION); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); assertThat(info.allTasksById).hasSize(3); // 2 tasks + finalization should be recorded ProfileInfo.Task task = info.allTasksById.get(1); assertThat(task.type).isEqualTo(ProfilerTask.VFS_STAT); // Check that task would have been dropped if profiler was not configured to record everything. assertThat(task.durationNanos).isLessThan(ProfilerTask.VFS_STAT.minDuration); } @Test public void testProfilerRecordingOnlySlowestEvents() throws Exception { Path profileData = cacheDir.getRelative("foo"); profiler.start(ProfiledTaskKinds.SLOWEST, profileData.getOutputStream(), "test", true, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat"); profiler.logSimpleTask(20000, 30000, ProfilerTask.REMOTE_EXECUTION, "remote execution"); assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isTrue(); assertThat(profiler.isProfiling(ProfilerTask.REMOTE_EXECUTION)).isFalse(); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(profileData); info.calculateStats(); assertThat(info.allTasksById).hasSize(1); // only VFS_STAT task should be recorded ProfileInfo.Task task = info.allTasksById.get(0); assertThat(task.type).isEqualTo(ProfilerTask.VFS_STAT); } @Test public void testProfilerRecordsNothing() throws Exception { Path profileData = cacheDir.getRelative("foo"); profiler.start(ProfiledTaskKinds.NONE, profileData.getOutputStream(), "test", true, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat"); assertThat(ProfilerTask.VFS_STAT.collectsSlowestInstances()).isTrue(); assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isFalse(); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(profileData); info.calculateStats(); assertThat(info.allTasksById).isEmpty(); } @Test public void testInconsistentCompleteTask() throws Exception { Path cacheFile = cacheDir.getRelative("profile2.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "task stack inconsistency test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.startTask(ProfilerTask.PHASE, "some task"); try { profiler.completeTask(ProfilerTask.ACTION); fail(); } catch (IllegalStateException e) { // this is expected } profiler.stop(); } @Test public void testConcurrentProfiling() throws Exception { Path cacheFile = cacheDir.getRelative("profile3.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "concurrent test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); long id = Thread.currentThread().getId(); Thread thread1 = new Thread() { @Override public void run() { for (int i = 0; i < 10000; i++) { Profiler.instance().logEvent(ProfilerTask.TEST, "thread1"); } } }; long id1 = thread1.getId(); Thread thread2 = new Thread() { @Override public void run() { for (int i = 0; i < 10000; i++) { Profiler.instance().logEvent(ProfilerTask.TEST, "thread2"); } } }; long id2 = thread2.getId(); profiler.startTask(ProfilerTask.PHASE, "main task"); profiler.logEvent(ProfilerTask.TEST, "starting threads"); thread1.start(); thread2.start(); thread2.join(); thread1.join(); profiler.logEvent(ProfilerTask.TEST, "joined"); profiler.completeTask(ProfilerTask.PHASE); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); info.analyzeRelationships(); assertThat(info.allTasksById).hasSize(4 + 10000 + 10000); // total number of tasks assertThat(info.tasksByThread).hasSize(3); // total number of threads // while main thread had 3 tasks, 2 of them were nested, so tasksByThread // would contain only one "main task" task assertThat(info.tasksByThread.get(id)).hasLength(2); ProfileInfo.Task mainTask = info.tasksByThread.get(id)[0]; assertThat(mainTask.getDescription()).isEqualTo("main task"); assertThat(mainTask.subtasks).hasLength(2); // other threads had 10000 independent recorded tasks each assertThat(info.tasksByThread.get(id1)).hasLength(10000); assertThat(info.tasksByThread.get(id2)).hasLength(10000); int startId = mainTask.subtasks[0].id; // id of "starting threads" int endId = mainTask.subtasks[1].id; // id of "joining" assertThat(startId).isLessThan(info.tasksByThread.get(id1)[0].id); assertThat(startId).isLessThan(info.tasksByThread.get(id2)[0].id); assertThat(endId).isGreaterThan(info.tasksByThread.get(id1)[9999].id); assertThat(endId).isGreaterThan(info.tasksByThread.get(id2)[9999].id); } @Test public void testPhaseTasks() throws Exception { Path cacheFile = cacheDir.getRelative("profile4.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "phase test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); Thread thread1 = new Thread() { @Override public void run() { for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.TEST, "thread1"); } } }; profiler.markPhase(ProfilePhase.INIT); // Empty phase. profiler.markPhase(ProfilePhase.LOAD); thread1.start(); thread1.join(); clock.advanceMillis(1); profiler.markPhase(ProfilePhase.ANALYZE); Thread thread2 = new Thread() { @Override public void run() { profiler.startTask(ProfilerTask.TEST, "complex task"); for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.TEST, "thread2a"); } profiler.completeTask(ProfilerTask.TEST); profiler.markPhase(ProfilePhase.EXECUTE); for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.TEST, "thread2b"); } } }; thread2.start(); thread2.join(); profiler.logEvent(ProfilerTask.TEST, "last task"); clock.advanceMillis(1); profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); info.analyzeRelationships(); // number of tasks: INIT(1) + LOAD(1) + Thread1.TEST(100) + ANALYZE(1) // + Thread2a.TEST(100) + TEST(1) + EXECUTE(1) + Thread2b.TEST(100) + TEST(1) + INFO(1) assertThat(info.allTasksById).hasSize(1 + 1 + 100 + 1 + 100 + 1 + 1 + 100 + 1 + 1); assertThat(info.tasksByThread).hasSize(3); // total number of threads // Phase0 contains only itself ProfileInfo.Task p0 = info.getPhaseTask(ProfilePhase.INIT); assertThat(info.getTasksForPhase(p0)).hasSize(1); // Phase1 contains itself and 100 TEST "thread1" tasks ProfileInfo.Task p1 = info.getPhaseTask(ProfilePhase.LOAD); assertThat(info.getTasksForPhase(p1)).hasSize(101); // Phase2 contains itself and 1 "complex task" ProfileInfo.Task p2 = info.getPhaseTask(ProfilePhase.ANALYZE); assertThat(info.getTasksForPhase(p2)).hasSize(2); // Phase3 contains itself, 100 TEST "thread2b" tasks and "last task" ProfileInfo.Task p3 = info.getPhaseTask(ProfilePhase.EXECUTE); assertThat(info.getTasksForPhase(p3)).hasSize(103); } @Test public void testCorruptedFile() throws Exception { Path cacheFile = cacheDir.getRelative("profile5.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "phase test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); for (int i = 0; i < 100; i++) { profiler.startTask(ProfilerTask.TEST, "outer task " + i); clock.advanceMillis(1); profiler.logEvent(ProfilerTask.TEST, "inner task " + i); profiler.completeTask(ProfilerTask.TEST); } profiler.stop(); ProfileInfo info = ProfileInfo.loadProfile(cacheFile); info.calculateStats(); assertThat(info.isCorruptedOrIncomplete()).isFalse(); Path corruptedFile = cacheDir.getRelative("profile5bad.dat"); FileSystemUtils.writeContent( corruptedFile, Arrays.copyOf(FileSystemUtils.readContent(cacheFile), 2000)); info = ProfileInfo.loadProfile(corruptedFile); info.calculateStats(); assertThat(info.isCorruptedOrIncomplete()).isTrue(); // Since root tasks will appear after nested tasks in the profile file and // we have exactly one nested task for each root task, the following will always // be true for our corrupted file: // 0 <= number_of_all_tasks - 2*number_of_root_tasks <= 1 assertThat(info.allTasksById.size() / 2).isEqualTo(info.rootTasksById.size()); } @Test public void testUnsupportedProfilerRecord() throws Exception { Path dataFile = cacheDir.getRelative("profile5.dat"); profiler.start(ProfiledTaskKinds.ALL, dataFile.getOutputStream(), "phase test", false, BlazeClock.instance(), BlazeClock.instance().nanoTime()); profiler.startTask(ProfilerTask.TEST, "outer task"); profiler.logEvent(ProfilerTask.EXCEPTION, "inner task"); profiler.completeTask(ProfilerTask.TEST); profiler.startTask(ProfilerTask.SCANNER, "outer task 2"); profiler.logSimpleTask(Profiler.nanoTimeMaybe(), ProfilerTask.TEST, "inner task 2"); profiler.completeTask(ProfilerTask.SCANNER); profiler.stop(); // Validate our test profile. ProfileInfo info = ProfileInfo.loadProfile(dataFile); info.calculateStats(); assertThat(info.isCorruptedOrIncomplete()).isFalse(); assertThat(info.getStatsForType(ProfilerTask.TEST, info.rootTasksById).count).isEqualTo(2); assertThat(info.getStatsForType(ProfilerTask.UNKNOWN, info.rootTasksById).count).isEqualTo(0); // Now replace "TEST" type with something unsupported - e.g. "XXXX". InputStream in = new InflaterInputStream(dataFile.getInputStream(), new Inflater(false), 65536); byte[] buffer = new byte[60000]; int len = in.read(buffer); in.close(); assertThat(len).isLessThan(buffer.length); // Validate that file was completely decoded. String content = new String(buffer, ISO_8859_1); int infoIndex = content.indexOf("TEST"); assertThat(infoIndex).isGreaterThan(0); content = content.substring(0, infoIndex) + "XXXX" + content.substring(infoIndex + 4); OutputStream out = new DeflaterOutputStream(dataFile.getOutputStream(), new Deflater(Deflater.BEST_SPEED, false), 65536); out.write(content.getBytes(ISO_8859_1)); out.close(); // Validate that XXXX records were classified as UNKNOWN. info = ProfileInfo.loadProfile(dataFile); info.calculateStats(); assertThat(info.isCorruptedOrIncomplete()).isFalse(); assertThat(info.getStatsForType(ProfilerTask.TEST, info.rootTasksById).count).isEqualTo(0); assertThat(info.getStatsForType(ProfilerTask.SCANNER, info.rootTasksById).count).isEqualTo(1); assertThat(info.getStatsForType(ProfilerTask.EXCEPTION, info.rootTasksById).count).isEqualTo(1); assertThat(info.getStatsForType(ProfilerTask.UNKNOWN, info.rootTasksById).count).isEqualTo(2); } @Test public void testResilenceToNonDecreasingNanoTimes() throws Exception { final long initialNanoTime = BlazeClock.instance().nanoTime(); final AtomicInteger numNanoTimeCalls = new AtomicInteger(0); Clock badClock = new Clock() { @Override public long currentTimeMillis() { return BlazeClock.instance().currentTimeMillis(); } @Override public long nanoTime() { return initialNanoTime - numNanoTimeCalls.addAndGet(1); } }; Path cacheFile = cacheDir.getRelative("profile1.dat"); profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "testResilenceToNonDecreasingNanoTimes", false, badClock, initialNanoTime); profiler.logSimpleTask(badClock.nanoTime(), ProfilerTask.TEST, "some task"); profiler.stop(); } }
Java
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.runtimefields.query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.script.Script; import org.elasticsearch.xpack.runtimefields.mapper.StringFieldScript; import java.util.List; import java.util.Objects; public class StringScriptFieldRangeQuery extends AbstractStringScriptFieldQuery { private final String lowerValue; private final String upperValue; private final boolean includeLower; private final boolean includeUpper; public StringScriptFieldRangeQuery( Script script, StringFieldScript.LeafFactory leafFactory, String fieldName, String lowerValue, String upperValue, boolean includeLower, boolean includeUpper ) { super(script, leafFactory, fieldName); this.lowerValue = Objects.requireNonNull(lowerValue); this.upperValue = Objects.requireNonNull(upperValue); this.includeLower = includeLower; this.includeUpper = includeUpper; assert lowerValue.compareTo(upperValue) <= 0; } @Override protected boolean matches(List<String> values) { for (String value : values) { int lct = lowerValue.compareTo(value); boolean lowerOk = includeLower ? lct <= 0 : lct < 0; if (lowerOk) { int uct = upperValue.compareTo(value); boolean upperOk = includeUpper ? uct >= 0 : uct > 0; if (upperOk) { return true; } } } return false; } @Override public void visit(QueryVisitor visitor) { if (visitor.acceptField(fieldName())) { visitor.consumeTermsMatching( this, fieldName(), () -> new ByteRunAutomaton( Automata.makeBinaryInterval(new BytesRef(lowerValue), includeLower, new BytesRef(upperValue), includeUpper) ) ); } } @Override public final String toString(String field) { StringBuilder b = new StringBuilder(); if (false == fieldName().contentEquals(field)) { b.append(fieldName()).append(':'); } b.append(includeLower ? '[' : '{'); b.append(lowerValue).append(" TO ").append(upperValue); b.append(includeUpper ? ']' : '}'); return b.toString(); } @Override public int hashCode() { return Objects.hash(super.hashCode(), lowerValue, upperValue, includeLower, includeUpper); } @Override public boolean equals(Object obj) { if (false == super.equals(obj)) { return false; } StringScriptFieldRangeQuery other = (StringScriptFieldRangeQuery) obj; return lowerValue.equals(other.lowerValue) && upperValue.equals(other.upperValue) && includeLower == other.includeLower && includeUpper == other.includeUpper; } String lowerValue() { return lowerValue; } String upperValue() { return upperValue; } boolean includeLower() { return includeLower; } boolean includeUpper() { return includeUpper; } }
Java
/* * Copyright 2014 Dominick Baier, Brock Allen * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using IdentityManager.Api.Models; using System; using System.Linq; namespace IdentityManager { static class IdentityManagerResultExtensions { public static ErrorModel ToError(this IdentityManagerResult result) { if (result == null) throw new ArgumentNullException("result"); return new ErrorModel { Errors = result.Errors.ToArray() }; } } }
Java
/* * Copyright © 2013-2018 camunda services GmbH and various authors ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.container.impl.jboss.deployment.marker; import java.util.List; import org.camunda.bpm.application.AbstractProcessApplication; import org.camunda.bpm.application.impl.metadata.spi.ProcessesXml; import org.camunda.bpm.container.impl.jboss.util.ProcessesXmlWrapper; import org.jboss.as.ee.component.ComponentDescription; import org.jboss.as.server.deployment.AttachmentKey; import org.jboss.as.server.deployment.AttachmentList; import org.jboss.as.server.deployment.DeploymentUnit; import org.jboss.jandex.AnnotationInstance; /** * * @author Daniel Meyer * */ public class ProcessApplicationAttachments { private static final AttachmentKey<Boolean> MARKER = AttachmentKey.create(Boolean.class); private static final AttachmentKey<Boolean> PART_OF_MARKER = AttachmentKey.create(Boolean.class); private static final AttachmentKey<AttachmentList<ProcessesXmlWrapper>> PROCESSES_XML_LIST = AttachmentKey.createList(ProcessesXmlWrapper.class); private static final AttachmentKey<ComponentDescription> PA_COMPONENT = AttachmentKey.create(ComponentDescription.class); private static final AttachmentKey<AnnotationInstance> POST_DEPLOY_METHOD = AttachmentKey.create(AnnotationInstance.class); private static final AttachmentKey<AnnotationInstance> PRE_UNDEPLOY_METHOD = AttachmentKey.create(AnnotationInstance.class); /** * Attach the parsed ProcessesXml file to a deployment unit. * */ public static void addProcessesXml(DeploymentUnit unit, ProcessesXmlWrapper processesXmlWrapper) { unit.addToAttachmentList(PROCESSES_XML_LIST, processesXmlWrapper); } /** * Returns the attached {@link ProcessesXml} marker or null; * */ public static List<ProcessesXmlWrapper> getProcessesXmls(DeploymentUnit deploymentUnit) { return deploymentUnit.getAttachmentList(PROCESSES_XML_LIST); } /** * marks a a {@link DeploymentUnit} as a process application */ public static void mark(DeploymentUnit unit) { unit.putAttachment(MARKER, Boolean.TRUE); } /** * marks a a {@link DeploymentUnit} as part of a process application */ public static void markPartOfProcessApplication(DeploymentUnit unit) { if(unit.getParent() != null && unit.getParent() != unit) { unit.getParent().putAttachment(PART_OF_MARKER, Boolean.TRUE); } } /** * return true if the deployment unit is either itself a process * application or part of a process application. */ public static boolean isPartOfProcessApplication(DeploymentUnit unit) { if(isProcessApplication(unit)) { return true; } if(unit.getParent() != null && unit.getParent() != unit) { return unit.getParent().hasAttachment(PART_OF_MARKER); } return false; } /** * Returns true if the {@link DeploymentUnit} itself is a process application (carries a processes.xml) * */ public static boolean isProcessApplication(DeploymentUnit deploymentUnit) { return deploymentUnit.hasAttachment(MARKER); } /** * Returns the {@link ComponentDescription} for the {@link AbstractProcessApplication} component */ public static ComponentDescription getProcessApplicationComponent(DeploymentUnit deploymentUnit) { return deploymentUnit.getAttachment(PA_COMPONENT); } /** * Attach the {@link ComponentDescription} for the {@link AbstractProcessApplication} component */ public static void attachProcessApplicationComponent(DeploymentUnit deploymentUnit, ComponentDescription componentDescription){ deploymentUnit.putAttachment(PA_COMPONENT, componentDescription); } /** * Attach the {@link AnnotationInstance}s for the PostDeploy methods */ public static void attachPostDeployDescription(DeploymentUnit deploymentUnit, AnnotationInstance annotation){ deploymentUnit.putAttachment(POST_DEPLOY_METHOD, annotation); } /** * Attach the {@link AnnotationInstance}s for the PreUndeploy methods */ public static void attachPreUndeployDescription(DeploymentUnit deploymentUnit, AnnotationInstance annotation){ deploymentUnit.putAttachment(PRE_UNDEPLOY_METHOD, annotation); } /** * @return the description of the PostDeploy method */ public static AnnotationInstance getPostDeployDescription(DeploymentUnit deploymentUnit) { return deploymentUnit.getAttachment(POST_DEPLOY_METHOD); } /** * @return the description of the PreUndeploy method */ public static AnnotationInstance getPreUndeployDescription(DeploymentUnit deploymentUnit) { return deploymentUnit.getAttachment(PRE_UNDEPLOY_METHOD); } private ProcessApplicationAttachments() { } }
Java
/* Copyright 2010, Object Management Group, Inc. * Copyright 2010, PrismTech, Inc. * Copyright 2010, Real-Time Innovations, Inc. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.omg.dds.type.typeobject; import java.util.List; import org.omg.dds.type.Extensibility; import org.omg.dds.type.ID; import org.omg.dds.type.Nested; @Extensibility(Extensibility.Kind.MUTABLE_EXTENSIBILITY) @Nested public interface UnionType extends Type { // ----------------------------------------------------------------------- // Properties // ----------------------------------------------------------------------- @ID(MemberId.MEMBER_UNIONTYPE_MEMBER_ID) public List<UnionMember> getMember(); // ----------------------------------------------------------------------- // Types // ----------------------------------------------------------------------- public static final class MemberId { // --- Constants: ---------------------------------------------------- public static final int MEMBER_UNIONTYPE_MEMBER_ID = 100; // --- Constructor: -------------------------------------------------- private MemberId() { // empty } } }
Java
/*------------------------------------------------------------------------- * * ipc.h * POSTGRES inter-process communication definitions. * * This file is misnamed, as it no longer has much of anything directly * to do with IPC. The functionality here is concerned with managing * exit-time cleanup for either a postmaster or a backend. * * * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * $PostgreSQL: pgsql/src/include/storage/ipc.h,v 1.81 2010/01/20 18:54:27 heikki Exp $ * *------------------------------------------------------------------------- */ #ifndef IPC_H #define IPC_H typedef void (*pg_on_exit_callback) (int code, Datum arg); typedef void (*shmem_startup_hook_type) (void); /*---------- * API for handling cleanup that must occur during either ereport(ERROR) * or ereport(FATAL) exits from a block of code. (Typical examples are * undoing transient changes to shared-memory state.) * * PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg); * { * ... code that might throw ereport(ERROR) or ereport(FATAL) ... * } * PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg); * * where the cleanup code is in a function declared per pg_on_exit_callback. * The Datum value "arg" can carry any information the cleanup function * needs. * * This construct ensures that cleanup_function() will be called during * either ERROR or FATAL exits. It will not be called on successful * exit from the controlled code. (If you want it to happen then too, * call the function yourself from just after the construct.) * * Note: the macro arguments are multiply evaluated, so avoid side-effects. *---------- */ #define PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg) \ do { \ on_shmem_exit(cleanup_function, arg); \ PG_TRY() #define PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg) \ cancel_shmem_exit(cleanup_function, arg); \ PG_CATCH(); \ { \ cancel_shmem_exit(cleanup_function, arg); \ cleanup_function (0, arg); \ PG_RE_THROW(); \ } \ PG_END_TRY(); \ } while (0) /* ipc.c */ extern bool proc_exit_inprogress; extern void proc_exit(int code); extern void shmem_exit(int code); extern void on_proc_exit(pg_on_exit_callback function, Datum arg); extern void on_shmem_exit(pg_on_exit_callback function, Datum arg); extern void cancel_shmem_exit(pg_on_exit_callback function, Datum arg); extern void on_exit_reset(void); /* ipci.c */ extern PGDLLIMPORT shmem_startup_hook_type shmem_startup_hook; extern void CreateSharedMemoryAndSemaphores(bool makePrivate, int port); #endif /* IPC_H */
Java
#!/bin/bash . ./setenv C_BIND=`uname -n` C_PORT="1528" S_PATH=$S_HOME/product-gfxd/bin #echo "creating schema" $S_PATH/gfxd run -file=schema_temp.sql -client-port=$C_PORT -client-bind-address=$C_BIND
Java
package alien4cloud.tosca.parser.postprocess; import static alien4cloud.utils.AlienUtils.safe; import javax.annotation.Resource; import org.alien4cloud.tosca.model.types.NodeType; import org.springframework.stereotype.Component; /** * Post process a node type. */ @Component public class NodeTypePostProcessor implements IPostProcessor<NodeType> { @Resource private CapabilityDefinitionPostProcessor capabilityDefinitionPostProcessor; @Resource private RequirementDefinitionPostProcessor requirementDefinitionPostProcessor; @Override public void process(NodeType instance) { safe(instance.getCapabilities()).forEach(capabilityDefinitionPostProcessor); safe(instance.getRequirements()).forEach(requirementDefinitionPostProcessor); } }
Java
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package protoutil import ( "bytes" "crypto/sha256" "encoding/asn1" "math/big" "github.com/golang/protobuf/proto" cb "github.com/hyperledger/fabric-protos-go/common" "github.com/pkg/errors" ) // NewBlock constructs a block with no data and no metadata. func NewBlock(seqNum uint64, previousHash []byte) *cb.Block { block := &cb.Block{} block.Header = &cb.BlockHeader{} block.Header.Number = seqNum block.Header.PreviousHash = previousHash block.Header.DataHash = []byte{} block.Data = &cb.BlockData{} var metadataContents [][]byte for i := 0; i < len(cb.BlockMetadataIndex_name); i++ { metadataContents = append(metadataContents, []byte{}) } block.Metadata = &cb.BlockMetadata{Metadata: metadataContents} return block } type asn1Header struct { Number *big.Int PreviousHash []byte DataHash []byte } func BlockHeaderBytes(b *cb.BlockHeader) []byte { asn1Header := asn1Header{ PreviousHash: b.PreviousHash, DataHash: b.DataHash, Number: new(big.Int).SetUint64(b.Number), } result, err := asn1.Marshal(asn1Header) if err != nil { // Errors should only arise for types which cannot be encoded, since the // BlockHeader type is known a-priori to contain only encodable types, an // error here is fatal and should not be propagated panic(err) } return result } func BlockHeaderHash(b *cb.BlockHeader) []byte { sum := sha256.Sum256(BlockHeaderBytes(b)) return sum[:] } func BlockDataHash(b *cb.BlockData) []byte { sum := sha256.Sum256(bytes.Join(b.Data, nil)) return sum[:] } // GetChannelIDFromBlockBytes returns channel ID given byte array which represents // the block func GetChannelIDFromBlockBytes(bytes []byte) (string, error) { block, err := UnmarshalBlock(bytes) if err != nil { return "", err } return GetChannelIDFromBlock(block) } // GetChannelIDFromBlock returns channel ID in the block func GetChannelIDFromBlock(block *cb.Block) (string, error) { if block == nil || block.Data == nil || block.Data.Data == nil || len(block.Data.Data) == 0 { return "", errors.New("failed to retrieve channel id - block is empty") } var err error envelope, err := GetEnvelopeFromBlock(block.Data.Data[0]) if err != nil { return "", err } payload, err := UnmarshalPayload(envelope.Payload) if err != nil { return "", err } if payload.Header == nil { return "", errors.New("failed to retrieve channel id - payload header is empty") } chdr, err := UnmarshalChannelHeader(payload.Header.ChannelHeader) if err != nil { return "", err } return chdr.ChannelId, nil } // GetMetadataFromBlock retrieves metadata at the specified index. func GetMetadataFromBlock(block *cb.Block, index cb.BlockMetadataIndex) (*cb.Metadata, error) { if block.Metadata == nil { return nil, errors.New("no metadata in block") } if len(block.Metadata.Metadata) <= int(index) { return nil, errors.Errorf("no metadata at index [%s]", index) } md := &cb.Metadata{} err := proto.Unmarshal(block.Metadata.Metadata[index], md) if err != nil { return nil, errors.Wrapf(err, "error unmarshalling metadata at index [%s]", index) } return md, nil } // GetMetadataFromBlockOrPanic retrieves metadata at the specified index, or // panics on error func GetMetadataFromBlockOrPanic(block *cb.Block, index cb.BlockMetadataIndex) *cb.Metadata { md, err := GetMetadataFromBlock(block, index) if err != nil { panic(err) } return md } // GetConsenterMetadataFromBlock attempts to retrieve consenter metadata from the value // stored in block metadata at index SIGNATURES (first field). If no consenter metadata // is found there, it falls back to index ORDERER (third field). func GetConsenterMetadataFromBlock(block *cb.Block) (*cb.Metadata, error) { m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_SIGNATURES) if err != nil { return nil, errors.WithMessage(err, "failed to retrieve metadata") } // TODO FAB-15864 Remove this fallback when we can stop supporting upgrade from pre-1.4.1 orderer if len(m.Value) == 0 { return GetMetadataFromBlock(block, cb.BlockMetadataIndex_ORDERER) } obm := &cb.OrdererBlockMetadata{} err = proto.Unmarshal(m.Value, obm) if err != nil { return nil, errors.Wrap(err, "failed to unmarshal orderer block metadata") } res := &cb.Metadata{} err = proto.Unmarshal(obm.ConsenterMetadata, res) if err != nil { return nil, errors.Wrap(err, "failed to unmarshal consenter metadata") } return res, nil } // GetLastConfigIndexFromBlock retrieves the index of the last config block as // encoded in the block metadata func GetLastConfigIndexFromBlock(block *cb.Block) (uint64, error) { m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_SIGNATURES) if err != nil { return 0, errors.WithMessage(err, "failed to retrieve metadata") } // TODO FAB-15864 Remove this fallback when we can stop supporting upgrade from pre-1.4.1 orderer if len(m.Value) == 0 { m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_LAST_CONFIG) if err != nil { return 0, errors.WithMessage(err, "failed to retrieve metadata") } lc := &cb.LastConfig{} err = proto.Unmarshal(m.Value, lc) if err != nil { return 0, errors.Wrap(err, "error unmarshalling LastConfig") } return lc.Index, nil } obm := &cb.OrdererBlockMetadata{} err = proto.Unmarshal(m.Value, obm) if err != nil { return 0, errors.Wrap(err, "failed to unmarshal orderer block metadata") } return obm.LastConfig.Index, nil } // GetLastConfigIndexFromBlockOrPanic retrieves the index of the last config // block as encoded in the block metadata, or panics on error func GetLastConfigIndexFromBlockOrPanic(block *cb.Block) uint64 { index, err := GetLastConfigIndexFromBlock(block) if err != nil { panic(err) } return index } // CopyBlockMetadata copies metadata from one block into another func CopyBlockMetadata(src *cb.Block, dst *cb.Block) { dst.Metadata = src.Metadata // Once copied initialize with rest of the // required metadata positions. InitBlockMetadata(dst) } // InitBlockMetadata initializes metadata structure func InitBlockMetadata(block *cb.Block) { if block.Metadata == nil { block.Metadata = &cb.BlockMetadata{Metadata: [][]byte{{}, {}, {}, {}, {}}} } else if len(block.Metadata.Metadata) < int(cb.BlockMetadataIndex_COMMIT_HASH+1) { for i := int(len(block.Metadata.Metadata)); i <= int(cb.BlockMetadataIndex_COMMIT_HASH); i++ { block.Metadata.Metadata = append(block.Metadata.Metadata, []byte{}) } } }
Java
/* * Copyright 2021 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.pinpoint.profiler.plugin.config; import com.navercorp.pinpoint.bootstrap.config.DefaultProfilerConfig; import com.navercorp.pinpoint.bootstrap.config.Value; import com.navercorp.pinpoint.common.util.StringUtils; import java.util.Collections; import java.util.List; public class DefaultPluginLoadingConfig implements PluginLoadingConfig { // ArtifactIdUtils.ARTIFACT_SEPARATOR private static final String ARTIFACT_SEPARATOR = ";"; private List<String> pluginLoadOrder = Collections.emptyList(); private List<String> disabledPlugins = Collections.emptyList(); private List<String> importPluginIds = Collections.emptyList(); public DefaultPluginLoadingConfig() { } @Override public List<String> getPluginLoadOrder() { return pluginLoadOrder; } @Value("${profiler.plugin.load.order}") public void setPluginLoadOrder(String pluginLoadOrder) { this.pluginLoadOrder = StringUtils.tokenizeToStringList(pluginLoadOrder, ","); } @Override public List<String> getDisabledPlugins() { return disabledPlugins; } @Value("${profiler.plugin.disable}") public void setDisabledPlugins(String disabledPlugins) { this.disabledPlugins = StringUtils.tokenizeToStringList(disabledPlugins, ","); } @Override public List<String> getImportPluginIds() { return importPluginIds; } @Value("${" + DefaultProfilerConfig.IMPORT_PLUGIN + "}") public void setImportPluginIds(String importPluginIds) { this.importPluginIds = StringUtils.tokenizeToStringList(importPluginIds, ARTIFACT_SEPARATOR); } @Override public String toString() { return "DefaultPluginLoadingConfig{" + "pluginLoadOrder=" + pluginLoadOrder + ", disabledPlugins=" + disabledPlugins + ", importPluginIds=" + importPluginIds + '}'; } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ /** * @author Pavel N. Vyssotski */ // OptionParser.cpp #include <cstring> #include "AgentBase.h" #include "MemoryManager.h" #include "AgentException.h" #include "Log.h" #include "OptionParser.h" using namespace jdwp; using namespace std; OptionParser::OptionParser() throw() { m_optionCount = 0; m_optionString = 0; m_options = 0; m_help = false; m_suspend = true; m_server = false; m_timeout = 0; m_transport = 0; m_address = 0; m_log = 0; m_kindFilter = 0; m_srcFilter = 0; m_onuncaught = false; m_onthrow = 0; m_launch = 0; } bool OptionParser::AsciiToBool(const char *str) throw(IllegalArgumentException) { if (strcmp("y", str) == 0) { return true; } else if (strcmp("n", str) == 0) { return false; } else { throw IllegalArgumentException(); } } void OptionParser::Parse(const char* str) throw(AgentException) { size_t i; int k; if (str == 0) return; const size_t len = strlen(str); if (len == 0) return; for (i = 0; i < len; i++) { if (str[i] == ',') { m_optionCount++; } else if (str[i] == '"' || str[i] == '\'') { char quote = str[i]; if (i > 0 && str[i-1] != '=') { throw IllegalArgumentException(); } i++; while (i < len && str[i] != quote) { i++; } if (i+1 < len && str[i+1] != ',') { throw IllegalArgumentException(); } } } m_optionCount++; m_optionString = reinterpret_cast<char*>(AgentBase::GetMemoryManager(). Allocate(len + 1 JDWP_FILE_LINE)); strcpy(m_optionString, str); m_options = reinterpret_cast<Option*>(AgentBase::GetMemoryManager(). Allocate(m_optionCount * sizeof(Option) JDWP_FILE_LINE)); m_options[0].name = m_optionString; m_options[0].value = ""; k = 0; bool waitEndOfOption = false; for (i = 0; i < len && k < m_optionCount; i++) { if ((m_optionString[i] == '=') && (!waitEndOfOption)) { waitEndOfOption = true; m_optionString[i] = '\0'; m_options[k].value = &m_optionString[i+1]; } else if (m_optionString[i] == ',') { waitEndOfOption = false; m_optionString[i] = '\0'; k++; m_options[k].name = &m_optionString[i+1]; m_options[k].value = ""; } else if (m_optionString[i] == '"' || m_optionString[i] == '\'') { char quote = m_optionString[i]; m_optionString[i] = '\0'; m_options[k].value = &m_optionString[i+1]; i++; while (i < len && m_optionString[i] != quote) { i++; } if (i < len) { m_optionString[i] = '\0'; } } } for (k = 0; k < m_optionCount; k++) { if (strcmp("transport", m_options[k].name) == 0) { m_transport = m_options[k].value; } else if (strcmp("address", m_options[k].name) == 0) { m_address = m_options[k].value; } else if (strcmp("timeout", m_options[k].name) == 0) { m_timeout = atol(m_options[k].value); } else if (strcmp("suspend", m_options[k].name) == 0) { m_suspend = AsciiToBool(m_options[k].value); } else if (strcmp("server", m_options[k].name) == 0) { m_server = AsciiToBool(m_options[k].value); } else if (strcmp("launch", m_options[k].name) == 0) { m_launch = m_options[k].value; } else if (strcmp("onuncaught", m_options[k].name) == 0) { m_onuncaught = AsciiToBool(m_options[k].value); } else if (strcmp("onthrow", m_options[k].name) == 0) { m_onthrow = m_options[k].value; } else if (strcmp("help", m_options[k].name) == 0) { m_help = true; #ifndef NDEBUG } else if (strcmp("log", m_options[k].name) == 0) { m_log = m_options[k].value; } else if (strcmp("trace", m_options[k].name) == 0) { m_kindFilter = m_options[k].value; } else if (strcmp("src", m_options[k].name) == 0) { m_srcFilter = m_options[k].value; #endif // NDEBUG } } if ((m_onthrow != 0) || (m_onuncaught != 0)) { if (m_launch == 0) { JDWP_ERROR("Specify launch=<command line> when using onthrow or onuncaught option"); throw IllegalArgumentException(); } } } OptionParser::~OptionParser() throw() { if (m_optionString != 0) AgentBase::GetMemoryManager().Free(m_optionString JDWP_FILE_LINE); if (m_options != 0) AgentBase::GetMemoryManager().Free(m_options JDWP_FILE_LINE); } const char *OptionParser::FindOptionValue(const char *name) const throw() { for (int i = 0; i < m_optionCount; i++) { if (strcmp(name, m_options[i].name) == 0) { return m_options[i].value; } } return 0; }
Java
package org.jaudiotagger.audio.mp4; import org.jaudiotagger.audio.generic.GenericAudioHeader; import org.jaudiotagger.audio.mp4.atom.Mp4EsdsBox; /** * Store some additional attributes not available for all audio types */ public class Mp4AudioHeader extends GenericAudioHeader { /** * The key for the kind field<br> * * @see #content */ public final static String FIELD_KIND = "KIND"; /** * The key for the profile<br> * * @see #content */ public final static String FIELD_PROFILE = "PROFILE"; /** * The key for the ftyp brand<br> * * @see #content */ public final static String FIELD_BRAND = "BRAND"; public void setKind(Mp4EsdsBox.Kind kind) { content.put(FIELD_KIND, kind); } /** * @return kind */ public Mp4EsdsBox.Kind getKind() { return (Mp4EsdsBox.Kind) content.get(FIELD_KIND); } /** * The key for the profile * * @param profile */ public void setProfile(Mp4EsdsBox.AudioProfile profile) { content.put(FIELD_PROFILE, profile); } /** * @return audio profile */ public Mp4EsdsBox.AudioProfile getProfile() { return (Mp4EsdsBox.AudioProfile) content.get(FIELD_PROFILE); } /** * @param brand */ public void setBrand(String brand) { content.put(FIELD_BRAND, brand); } /** * @return brand */ public String getBrand() { return (String) content.get(FIELD_BRAND); } }
Java
######################################################################## # # Linux on Hyper-V and Azure Test Code, ver. 1.0.0 # Copyright (c) Microsoft Corporation # # All rights reserved. # Licensed under the Apache License, Version 2.0 (the ""License""); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION # ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR # PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT. # # See the Apache Version 2.0 License for specific language governing # permissions and limitations under the License. # ######################################################################## <# .Synopsis Run continous Ping while disabling and enabling the SR-IOV feature .Description Continuously ping a server, from a Linux client, over a SR-IOV connection. Disable SR-IOV on the Linux client and observe RTT increase. Re-enable SR-IOV and observe that RTT lowers. .Parameter vmName Name of the test VM. .Parameter hvServer Name of the Hyper-V server hosting the VM. .Parameter testParams Semicolon separated list of test parameters. This setup script does not use any setup scripts. .Example <test> <testName>Ping_DisableVF</testName> <testScript>setupscripts\SR-IOV_Ping_DisableVF.ps1</testScript> <files>remote-scripts/ica/utils.sh,remote-scripts/ica/SR-IOV_Utils.sh</files> <setupScript> <file>setupscripts\RevertSnapshot.ps1</file> <file>setupscripts\SR-IOV_enable.ps1</file> </setupScript> <noReboot>False</noReboot> <testParams> <param>NIC=NetworkAdapter,External,SRIOV,001600112200</param> <param>TC_COVERED=SRIOV-5A</param> <param>VF_IP1=10.11.12.31</param> <param>VF_IP2=10.11.12.32</param> <param>NETMASK=255.255.255.0</param> <param>REMOTE_SERVER=remoteHostname</param> </testParams> <timeout>1800</timeout> </test> #> param ([String] $vmName, [String] $hvServer, [string] $testParams) ############################################################# # # Main script body # ############################################################# $retVal = $False $leaveTrail = "no" # # Check the required input args are present # # Write out test Params $testParams if ($hvServer -eq $null) { "ERROR: hvServer is null" return $False } if ($testParams -eq $null) { "ERROR: testParams is null" return $False } #change working directory to root dir $testParams -match "RootDir=([^;]+)" if (-not $?) { "Mandatory param RootDir=Path; not found!" return $false } $rootDir = $Matches[1] if (Test-Path $rootDir) { Set-Location -Path $rootDir if (-not $?) { "ERROR: Could not change directory to $rootDir !" return $false } "Changed working directory to $rootDir" } else { "ERROR: RootDir = $rootDir is not a valid path" return $false } # Source TCUitls.ps1 for getipv4 and other functions if (Test-Path ".\setupScripts\TCUtils.ps1") { . .\setupScripts\TCUtils.ps1 } else { "ERROR: Could not find setupScripts\TCUtils.ps1" return $false } # Source NET_UTILS.ps1 for network functions if (Test-Path ".\setupScripts\NET_UTILS.ps1") { . .\setupScripts\NET_UTILS.ps1 } else { "ERROR: Could not find setupScripts\NET_Utils.ps1" return $false } # Process the test params $params = $testParams.Split(';') foreach ($p in $params) { $fields = $p.Split("=") switch ($fields[0].Trim()) { "SshKey" { $sshKey = $fields[1].Trim() } "ipv4" { $ipv4 = $fields[1].Trim() } "VF_IP1" { $vmVF_IP1 = $fields[1].Trim() } "VF_IP2" { $vmVF_IP2 = $fields[1].Trim() } "NETMASK" { $netmask = $fields[1].Trim() } "VM2NAME" { $vm2Name = $fields[1].Trim() } "REMOTE_SERVER" { $remoteServer = $fields[1].Trim()} "TC_COVERED" { $TC_COVERED = $fields[1].Trim() } } } $summaryLog = "${vmName}_summary.log" del $summaryLog -ErrorAction SilentlyContinue Write-Output "This script covers test case: ${TC_COVERED}" | Tee-Object -Append -file $summaryLog # Get IPs $ipv4 = GetIPv4 $vmName $hvServer "${vmName} IPADDRESS: ${ipv4}" # # Configure VF on test VM # Start-Sleep -s 5 $retVal = ConfigureVF $ipv4 $sshKey $netmask if (-not $retVal) { "ERROR: Failed to configure eth1 on vm $vmName (IP: ${ipv4}), by setting a static IP of $vmVF_IP1 , netmask $netmask" return $false } Start-Sleep -s 10 # # Run Ping with SR-IOV enabled # .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "echo 'source constants.sh && ping -c 600 -I eth1 `$VF_IP2 > PingResults.log &' > runPing.sh" Start-Sleep -s 5 .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "bash ~/runPing.sh > ~/Ping.log 2>&1" # Wait 60 seconds and read the RTT "Get Logs" Start-Sleep -s 30 [decimal]$vfEnabledRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'" if (-not $vfEnabledRTT){ "ERROR: No result was logged! Check if Ping was executed!" | Tee-Object -Append -file $summaryLog return $false } "The RTT before disabling SR-IOV is $vfEnabledRTT ms" | Tee-Object -Append -file $summaryLog # # Disable SR-IOV on test VM # Start-Sleep -s 5 "Disabling VF on vm1" Set-VMNetworkAdapter -VMName $vmName -ComputerName $hvServer -IovWeight 0 if (-not $?) { "ERROR: Failed to disable SR-IOV on $vmName!" | Tee-Object -Append -file $summaryLog return $false } # Read the RTT with SR-IOV disabled; it should be higher Start-Sleep -s 30 [decimal]$vfDisabledRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'" if (-not $vfDisabledRTT){ "ERROR: No result was logged after SR-IOV was disabled!" | Tee-Object -Append -file $summaryLog return $false } "The RTT with SR-IOV disabled is $vfDisabledRTT ms" | Tee-Object -Append -file $summaryLog if ($vfDisabledRTT -le $vfEnabledRTT) { "ERROR: The RTT was lower with SR-IOV disabled, it should be higher" | Tee-Object -Append -file $summaryLog return $false } # # Enable SR-IOV on test VM "Enable VF on vm1" Set-VMNetworkAdapter -VMName $vmName -ComputerName $hvServer -IovWeight 1 if (-not $?) { "ERROR: Failed to enable SR-IOV on $vmName!" | Tee-Object -Append -file $summaryLog return $false } Start-Sleep -s 30 # Read the RTT again, it should be lower than before # We should see values to close to the initial RTT measured [decimal]$vfEnabledRTT = $vfEnabledRTT * 1.3 [decimal]$vfFinalRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'" "The RTT after re-enabling SR-IOV is $vfFinalRTT ms" | Tee-Object -Append -file $summaryLog if ($vfFinalRTT -gt $vfEnabledRTT) { "ERROR: After re-enabling SR-IOV, the RTT value has not lowered enough Please check if the VF was successfully restarted" | Tee-Object -Append -file $summaryLog return $false } return $true
Java
/** * @license * Copyright 2020 The FOAM Authors. All Rights Reserved. * http://www.apache.org/licenses/LICENSE-2.0 */ foam.CLASS({ package: 'foam.nanos.crunch.lite', name: 'MinMaxCapabilityRefinement', refines: 'foam.nanos.crunch.MinMaxCapability', implements: [ 'foam.nanos.crunch.lite.CapableCompatibleCapability' ], javaImports: [ 'foam.nanos.crunch.CapabilityJunctionPayload', 'foam.nanos.crunch.CrunchService', 'static foam.nanos.crunch.CapabilityJunctionStatus.*' ], methods: [ { name: 'getCapableChainedStatus', documentation: ` numberGrantedOrPending are the available CapablePayloads which are GRANTED or can eventually be turned into GRANTED from PENDING state. If MinMaxCapability.min is greater than the number of available payloads which are GRANTED or can eventually be turned into GRANTED, then it is impossible for the total amount of GRANTED payloads to be greater than the MIN, thereby fulfilling the minimum requirement. For example, let there be a min max capablity which has 10 prerequisites and a min of 2. If the user selected only 3 of those prereqs in the wizard, then the CapablePayload.status for those 3 will each be in PENDING state with approvals generated for each one. Note, there will only be these 3 CapablePayloads out of the 10 Prereqs avaliable on the Capable object since the user only selected 3. If 1 of those 3 CapablePayloads get rejected. Then there will be 2 numberGrantedOrPending which could still potentially satisfy the min requirement of 2 if those 2 CapablePayloads get set to GRANTED. If 2 of those 3 CapablePayloads get rejected. Then there will be 1 numberGrantedOrPending which would be impossible to satisfy the MinMaxCapability.min requirement of 2 even if that 1 CapablePayload is GRANTED. `, javaCode: ` CrunchService crunchService = (CrunchService) x.get("crunchService"); List<String> prereqCapIds = crunchService.getPrereqs(getId()); int numberGranted = 0; int numberPending = 0; int numberRejected = 0; for ( String capId : prereqCapIds ) { CapabilityJunctionPayload prereqPayload = (CapabilityJunctionPayload) capablePayloadDAO.find(capId); if ( prereqPayload == null ) { continue; } switch ( prereqPayload.getStatus() ) { case GRANTED: numberGranted++; continue; case PENDING: case APPROVED: numberPending++; continue; case REJECTED: numberRejected++; continue; } } int numberTotal = numberGranted + numberPending + numberRejected; int numberGrantedOrPending = numberGranted + numberPending; if ( numberTotal == 0 ){ return CapabilityJunctionStatus.ACTION_REQUIRED; } if ( getMin() > numberGrantedOrPending ){ return CapabilityJunctionStatus.REJECTED; } if ( numberGranted >= getMin() ) { return CapabilityJunctionStatus.GRANTED; } if ( numberTotal >= getMin() ) { return CapabilityJunctionStatus.PENDING; } return CapabilityJunctionStatus.ACTION_REQUIRED; ` } ] });
Java
/* * Copyright 2016 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.serverhealth; import com.thoughtworks.go.config.CaseInsensitiveString; import com.thoughtworks.go.config.CruiseConfig; import com.thoughtworks.go.config.PipelineConfig; import com.thoughtworks.go.config.remote.ConfigRepoConfig; import com.thoughtworks.go.domain.materials.Material; import com.thoughtworks.go.domain.materials.MaterialConfig; import org.apache.commons.lang.StringUtils; import java.util.HashSet; import java.util.Set; public class HealthStateScope implements Comparable<HealthStateScope> { public static final HealthStateScope GLOBAL = new HealthStateScope(ScopeType.GLOBAL, "GLOBAL"); private final ScopeType type; private final String scope; private HealthStateScope(ScopeType type, String scope) { this.type = type; this.scope = scope; } public static HealthStateScope forGroup(String groupName) { return new HealthStateScope(ScopeType.GROUP, groupName); } public static HealthStateScope forPipeline(String pipelineName) { return new HealthStateScope(ScopeType.PIPELINE, pipelineName); } public static HealthStateScope forFanin(String pipelineName) { return new HealthStateScope(ScopeType.FANIN, pipelineName); } public static HealthStateScope forStage(String pipelineName, String stageName) { return new HealthStateScope(ScopeType.STAGE, pipelineName + "/" + stageName); } public static HealthStateScope forJob(String pipelineName, String stageName, String jobName) { return new HealthStateScope(ScopeType.JOB, pipelineName + "/" + stageName + "/" + jobName); } public static HealthStateScope forMaterial(Material material) { return new HealthStateScope(ScopeType.MATERIAL, material.getSqlCriteria().toString()); } public static HealthStateScope forMaterialUpdate(Material material) { return new HealthStateScope(ScopeType.MATERIAL_UPDATE, material.getFingerprint()); } public static HealthStateScope forMaterialConfig(MaterialConfig materialConfig) { return new HealthStateScope(ScopeType.MATERIAL, materialConfig.getSqlCriteria().toString()); } public static HealthStateScope forMaterialConfigUpdate(MaterialConfig materialConfig) { return new HealthStateScope(ScopeType.MATERIAL_UPDATE, materialConfig.getFingerprint()); } public static HealthStateScope forConfigRepo(String operation) { return new HealthStateScope(ScopeType.CONFIG_REPO, operation); } public static HealthStateScope forPartialConfigRepo(ConfigRepoConfig repoConfig) { return new HealthStateScope(ScopeType.CONFIG_PARTIAL, repoConfig.getMaterialConfig().getFingerprint()); } public static HealthStateScope forPartialConfigRepo(String fingerprint) { return new HealthStateScope(ScopeType.CONFIG_PARTIAL, fingerprint); } public boolean isSame(String scope) { return StringUtils.endsWithIgnoreCase(this.scope, scope); } public boolean isForPipeline() { return type == ScopeType.PIPELINE; } public boolean isForGroup() { return type == ScopeType.GROUP; } public boolean isForMaterial() { return type == ScopeType.MATERIAL; } ScopeType getType() { return type; } public String getScope() { return scope; } public String toString() { return String.format("LogScope[%s, scope=%s]", type, scope); } public boolean equals(Object that) { if (this == that) { return true; } if (that == null) { return false; } if (getClass() != that.getClass()) { return false; } return equals((HealthStateScope) that); } private boolean equals(HealthStateScope that) { if (type != that.type) { return false; } if (!scope.equals(that.scope)) { return false; } return true; } public int hashCode() { int result = type.hashCode(); result = 31 * result + (scope != null ? scope.hashCode() : 0); return result; } public boolean isRemovedFromConfig(CruiseConfig cruiseConfig) { return type.isRemovedFromConfig(cruiseConfig, scope); } public static HealthStateScope forAgent(String cookie) { return new HealthStateScope(ScopeType.GLOBAL, cookie); } public static HealthStateScope forInvalidConfig() { return new HealthStateScope(ScopeType.GLOBAL, "global"); } public int compareTo(HealthStateScope o) { int comparison; comparison = type.compareTo(o.type); if (comparison != 0) { return comparison; } comparison = scope.compareTo(o.scope); if (comparison != 0) { return comparison; } return 0; } public static HealthStateScope forPlugin(String symbolicName) { return new HealthStateScope(ScopeType.PLUGIN, symbolicName); } public Set<String> getPipelineNames(CruiseConfig config) { HashSet<String> pipelineNames = new HashSet<>(); switch (type) { case PIPELINE: case FANIN: pipelineNames.add(scope); break; case STAGE: case JOB: pipelineNames.add(scope.split("/")[0]); break; case MATERIAL: for (PipelineConfig pc : config.getAllPipelineConfigs()) { for (MaterialConfig mc : pc.materialConfigs()) { String scope = HealthStateScope.forMaterialConfig(mc).getScope(); if (scope.equals(this.scope)) { pipelineNames.add(pc.name().toString()); } } } break; case MATERIAL_UPDATE: for (PipelineConfig pc : config.getAllPipelineConfigs()) { for (MaterialConfig mc : pc.materialConfigs()) { String scope = HealthStateScope.forMaterialConfigUpdate(mc).getScope(); if (scope.equals(this.scope)) { pipelineNames.add(pc.name().toString()); } } } break; } return pipelineNames; } public boolean isForConfigPartial() { return type == ScopeType.CONFIG_PARTIAL; } enum ScopeType { GLOBAL, CONFIG_REPO, GROUP { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String group) { return !cruiseConfig.hasPipelineGroup(group); } }, MATERIAL { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) { for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) { if (HealthStateScope.forMaterialConfig(materialConfig).getScope().equals(materialScope)) { return false; } } return true; } }, MATERIAL_UPDATE { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) { for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) { if (HealthStateScope.forMaterialConfigUpdate(materialConfig).getScope().equals(materialScope)) { return false; } } return true; } }, CONFIG_PARTIAL { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) { for (ConfigRepoConfig configRepoConfig : cruiseConfig.getConfigRepos()) { if (HealthStateScope.forPartialConfigRepo(configRepoConfig).getScope().equals(materialScope)) { return false; } } return true; } }, PIPELINE { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) { return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline)); } }, FANIN { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) { return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline)); } }, STAGE { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStage) { String[] parts = pipelineStage.split("/"); return !cruiseConfig.hasStageConfigNamed(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), true); } }, JOB { public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStageJob) { String[] parts = pipelineStageJob.split("/"); return !cruiseConfig.hasBuildPlan(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), parts[2], true); } }, PLUGIN; protected boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String scope) { return false; }; } }
Java
/* * Copyright 2019 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.stunner.bpmn.client.marshall.converters.customproperties; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertEquals; public class AssociationListTest { private AssociationList tested; public static final String VALUE = "[din]var1->input1,[din]var2->input2,[dout]var3->output1," + "[dout]var5->output2"; public static final String VALUE_WITH_COMMA = "[din]var1->input1,[din]var2->input2,input22,input33," + "[dout]var3->output1,[dout]var5->output2,output22,ouput23"; @Before public void setUp() { tested = new AssociationList(); } @Test public void fromString() { AssociationList list = tested.fromString(VALUE); assertEquals(2, list.getInputs().size()); assertEquals(2, list.getOutputs().size()); } @Test public void fromStringWithComma() { AssociationList list = tested.fromString(VALUE_WITH_COMMA); assertEquals(2, list.getInputs().size()); assertEquals(2, list.getOutputs().size()); } }
Java
/* * Copyright 2008-2013 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package voldemort.cluster; import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import voldemort.VoldemortException; import voldemort.annotations.concurrency.Threadsafe; import voldemort.annotations.jmx.JmxGetter; import voldemort.annotations.jmx.JmxManaged; import voldemort.utils.Utils; import com.google.common.collect.Sets; /** * A representation of the voldemort cluster * * */ @Threadsafe @JmxManaged(description = "Metadata about the physical servers on which the Voldemort cluster runs") public class Cluster implements Serializable { private static final long serialVersionUID = 1; private final String name; private final int numberOfPartitionIds; private final Map<Integer, Node> nodesById; private final Map<Integer, Zone> zonesById; private final Map<Zone, List<Integer>> nodesPerZone; private final Map<Zone, List<Integer>> partitionsPerZone; // Since partitionId space must be dense, arrays could be used instead of // maps. To do so, the partition ID range would have to be determined. This // could be done by summing up the lengths of each node's .getPartitionIds() // returned list. This could be faster to construct and lookup by some // constant and memory footprint could be better. private final Map<Integer, Zone> partitionIdToZone; private final Node[] partitionIdToNodeArray; private final Map<Integer, Node> partitionIdToNode; private final Map<Integer, Integer> partitionIdToNodeId; public Cluster(String name, List<Node> nodes) { this(name, nodes, new ArrayList<Zone>()); } public Cluster(String name, List<Node> nodes, List<Zone> zones) { this.name = Utils.notNull(name); this.partitionsPerZone = new LinkedHashMap<Zone, List<Integer>>(); this.nodesPerZone = new LinkedHashMap<Zone, List<Integer>>(); this.partitionIdToZone = new HashMap<Integer, Zone>(); Map<Integer, Node> partitionIdToNodeMap = new HashMap<Integer, Node>(); this.partitionIdToNode = new HashMap<Integer, Node>(); this.partitionIdToNodeId = new HashMap<Integer, Integer>(); if(zones.size() != 0) { zonesById = new LinkedHashMap<Integer, Zone>(zones.size()); for(Zone zone: zones) { if(zonesById.containsKey(zone.getId())) throw new IllegalArgumentException("Zone id " + zone.getId() + " appears twice in the zone list."); zonesById.put(zone.getId(), zone); nodesPerZone.put(zone, new ArrayList<Integer>()); partitionsPerZone.put(zone, new ArrayList<Integer>()); } } else { // Add default zone zonesById = new LinkedHashMap<Integer, Zone>(1); Zone defaultZone = new Zone(); zonesById.put(defaultZone.getId(), defaultZone); nodesPerZone.put(defaultZone, new ArrayList<Integer>()); partitionsPerZone.put(defaultZone, new ArrayList<Integer>()); } this.nodesById = new LinkedHashMap<Integer, Node>(nodes.size()); for(Node node: nodes) { if(nodesById.containsKey(node.getId())) throw new IllegalArgumentException("Node id " + node.getId() + " appears twice in the node list."); nodesById.put(node.getId(), node); Zone nodesZone = zonesById.get(node.getZoneId()); if(nodesZone == null) { throw new IllegalArgumentException("No zone associated with this node exists."); } nodesPerZone.get(nodesZone).add(node.getId()); partitionsPerZone.get(nodesZone).addAll(node.getPartitionIds()); for(Integer partitionId: node.getPartitionIds()) { if(this.partitionIdToNodeId.containsKey(partitionId)) { throw new IllegalArgumentException("Partition id " + partitionId + " found on two nodes : " + node.getId() + " and " + this.partitionIdToNodeId.get(partitionId)); } this.partitionIdToZone.put(partitionId, nodesZone); partitionIdToNodeMap.put(partitionId, node); this.partitionIdToNode.put(partitionId, node); this.partitionIdToNodeId.put(partitionId, node.getId()); } } this.numberOfPartitionIds = getNumberOfTags(nodes); this.partitionIdToNodeArray = new Node[this.numberOfPartitionIds]; for(int partitionId = 0; partitionId < this.numberOfPartitionIds; partitionId++) { this.partitionIdToNodeArray[partitionId] = partitionIdToNodeMap.get(partitionId); } } private int getNumberOfTags(List<Node> nodes) { List<Integer> tags = new ArrayList<Integer>(); for(Node node: nodes) { tags.addAll(node.getPartitionIds()); } Collections.sort(tags); for(int i = 0; i < numberOfPartitionIds; i++) { if(tags.get(i).intValue() != i) throw new IllegalArgumentException("Invalid tag assignment."); } return tags.size(); } @JmxGetter(name = "name", description = "The name of the cluster") public String getName() { return name; } public Collection<Node> getNodes() { return nodesById.values(); } /** * @return Sorted set of node Ids */ public Set<Integer> getNodeIds() { Set<Integer> nodeIds = nodesById.keySet(); return new TreeSet<Integer>(nodeIds); } /** * * @return Sorted set of Zone Ids */ public Set<Integer> getZoneIds() { Set<Integer> zoneIds = zonesById.keySet(); return new TreeSet<Integer>(zoneIds); } public Collection<Zone> getZones() { return zonesById.values(); } public Zone getZoneById(int id) { Zone zone = zonesById.get(id); if(zone == null) { throw new VoldemortException("No such zone in cluster: " + id + " Available zones : " + displayZones()); } return zone; } private String displayZones() { String zoneIDS = "{"; for(Zone z: this.getZones()) { if(zoneIDS.length() != 1) zoneIDS += ","; zoneIDS += z.getId(); } zoneIDS += "}"; return zoneIDS; } public int getNumberOfZones() { return zonesById.size(); } public int getNumberOfPartitionsInZone(Integer zoneId) { return partitionsPerZone.get(getZoneById(zoneId)).size(); } public int getNumberOfNodesInZone(Integer zoneId) { return nodesPerZone.get(getZoneById(zoneId)).size(); } /** * @return Sorted set of node Ids for given zone */ public Set<Integer> getNodeIdsInZone(Integer zoneId) { return new TreeSet<Integer>(nodesPerZone.get(getZoneById(zoneId))); } /** * @return Sorted set of partition Ids for given zone */ public Set<Integer> getPartitionIdsInZone(Integer zoneId) { return new TreeSet<Integer>(partitionsPerZone.get(getZoneById(zoneId))); } public Zone getZoneForPartitionId(int partitionId) { return partitionIdToZone.get(partitionId); } public Node getNodeForPartitionId(int partitionId) { return this.partitionIdToNodeArray[partitionId]; } public Node[] getPartitionIdToNodeArray() { return this.partitionIdToNodeArray; } /** * * @return Map of partition id to node id. */ public Map<Integer, Integer> getPartitionIdToNodeIdMap() { return new HashMap<Integer, Integer>(partitionIdToNodeId); } public Node getNodeById(int id) { Node node = nodesById.get(id); if(node == null) throw new VoldemortException("No such node in cluster: " + id); return node; } /** * Given a cluster and a node id checks if the node exists * * @param nodeId The node id to search for * @return True if cluster contains the node id, else false */ public boolean hasNodeWithId(int nodeId) { Node node = nodesById.get(nodeId); if(node == null) { return false; } return true; } @JmxGetter(name = "numberOfNodes", description = "The number of nodes in the cluster.") public int getNumberOfNodes() { return nodesById.size(); } public int getNumberOfPartitions() { return numberOfPartitionIds; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Cluster('"); builder.append(getName()); builder.append("', ["); for(Node n: getNodes()) { builder.append(n.toString()); builder.append('\n'); } builder.append("])"); return builder.toString(); } /** * Return a detailed string representation of the current cluster * * @param isDetailed * @return descripton of cluster */ public String toString(boolean isDetailed) { if(!isDetailed) { return toString(); } StringBuilder builder = new StringBuilder("Cluster [" + getName() + "] Nodes [" + getNumberOfNodes() + "] Zones [" + getNumberOfZones() + "] Partitions [" + getNumberOfPartitions() + "]"); builder.append(" Zone Info [" + getZones() + "]"); builder.append(" Node Info [" + getNodes() + "]"); return builder.toString(); } /** * Clones the cluster by constructing a new one with same name, partition * layout, and nodes. * * @param cluster * @return clone of Cluster cluster. */ public static Cluster cloneCluster(Cluster cluster) { // Could add a better .clone() implementation that clones the derived // data structures. The constructor invoked by this clone implementation // can be slow for large numbers of partitions. Probably faster to copy // all the maps and stuff. return new Cluster(cluster.getName(), new ArrayList<Node>(cluster.getNodes()), new ArrayList<Zone>(cluster.getZones())); /*- * Historic "clone" code being kept in case this, for some reason, was the "right" way to be doing this. ClusterMapper mapper = new ClusterMapper(); return mapper.readCluster(new StringReader(mapper.writeCluster(cluster))); */ } @Override public boolean equals(Object second) { if(this == second) return true; if(second == null || second.getClass() != getClass()) return false; Cluster secondCluster = (Cluster) second; if(this.getZones().size() != secondCluster.getZones().size()) { return false; } if(this.getNodes().size() != secondCluster.getNodes().size()) { return false; } for(Zone zoneA: this.getZones()) { Zone zoneB; try { zoneB = secondCluster.getZoneById(zoneA.getId()); } catch(VoldemortException e) { return false; } if(zoneB == null || zoneB.getProximityList().size() != zoneA.getProximityList().size()) { return false; } for(int index = 0; index < zoneA.getProximityList().size(); index++) { if(zoneA.getProximityList().get(index) != zoneB.getProximityList().get(index)) { return false; } } } for(Node nodeA: this.getNodes()) { Node nodeB; try { nodeB = secondCluster.getNodeById(nodeA.getId()); } catch(VoldemortException e) { return false; } if(nodeA.getNumberOfPartitions() != nodeB.getNumberOfPartitions()) { return false; } if(nodeA.getZoneId() != nodeB.getZoneId()) { return false; } if(!Sets.newHashSet(nodeA.getPartitionIds()) .equals(Sets.newHashSet(nodeB.getPartitionIds()))) return false; } return true; } @Override public int hashCode() { int hc = getNodes().size(); for(Node node: getNodes()) { hc ^= node.getHost().hashCode(); } return hc; } }
Java
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <title>Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition (ARX GUI Documentation)</title> <link rel="stylesheet" type="text/css" href="../../../../../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition (ARX GUI Documentation)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../../../org/deidentifier/arx/gui/view/impl/define/ViewAttributeDefinition.html" title="class in org.deidentifier.arx.gui.view.impl.define">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../../../index.html?org/deidentifier/arx/gui/view/impl/define/class-use/ViewAttributeDefinition.html" target="_top">Frames</a></li> <li><a href="ViewAttributeDefinition.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition" class="title">Uses of Class<br>org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition</h2> </div> <div class="classUseContainer">No usage of org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../../../org/deidentifier/arx/gui/view/impl/define/ViewAttributeDefinition.html" title="class in org.deidentifier.arx.gui.view.impl.define">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../../../index.html?org/deidentifier/arx/gui/view/impl/define/class-use/ViewAttributeDefinition.html" target="_top">Frames</a></li> <li><a href="ViewAttributeDefinition.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
Java
<?php final class PhabricatorSetupCheckRepositories extends PhabricatorSetupCheck { protected function executeChecks() { $repo_path = PhabricatorEnv::getEnvConfig('repository.default-local-path'); if (!$repo_path) { $summary = pht( "The configuration option '%s' is not set.", 'repository.default-local-path'); $this->newIssue('repository.default-local-path.empty') ->setName(pht('Missing Repository Local Path')) ->setSummary($summary) ->addPhabricatorConfig('repository.default-local-path'); return; } if (!Filesystem::pathExists($repo_path)) { $summary = pht( 'The path for local repositories does not exist, or is not '. 'readable by the webserver.'); $message = pht( "The directory for local repositories (%s) does not exist, or is not ". "readable by the webserver. Phabricator uses this directory to store ". "information about repositories. If this directory does not exist, ". "create it:\n\n". "%s\n". "If this directory exists, make it readable to the webserver. You ". "can also edit the configuration below to use some other directory.", phutil_tag('tt', array(), $repo_path), phutil_tag('pre', array(), csprintf('$ mkdir -p %s', $repo_path))); $this->newIssue('repository.default-local-path.empty') ->setName(pht('Missing Repository Local Path')) ->setSummary($summary) ->setMessage($message) ->addPhabricatorConfig('repository.default-local-path'); } } }
Java
/** * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.common.nosql.cassandra.dao.model; import nl.jqno.equalsverifier.EqualsVerifier; import nl.jqno.equalsverifier.Warning; import org.junit.Test; public class CassandraEndpointUserTest { @Test public void hashCodeEqualsTest(){ EqualsVerifier.forClass(CassandraEndpointUser.class).suppress(Warning.NONFINAL_FIELDS).verify(); } }
Java
# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fastfood Chef Cookbook manager.""" from __future__ import print_function import os from fastfood import utils class CookBook(object): """Chef Cookbook object. Understands metadata.rb, Berksfile and how to parse them. """ def __init__(self, path): """Initialize CookBook wrapper at 'path'.""" self.path = utils.normalize_path(path) self._metadata = None if not os.path.isdir(path): raise ValueError("Cookbook dir %s does not exist." % self.path) self._berksfile = None @property def name(self): """Cookbook name property.""" try: return self.metadata.to_dict()['name'] except KeyError: raise LookupError("%s is missing 'name' attribute'." % self.metadata) @property def metadata(self): """Return dict representation of this cookbook's metadata.rb .""" self.metadata_path = os.path.join(self.path, 'metadata.rb') if not os.path.isfile(self.metadata_path): raise ValueError("Cookbook needs metadata.rb, %s" % self.metadata_path) if not self._metadata: self._metadata = MetadataRb(open(self.metadata_path, 'r+')) return self._metadata @property def berksfile(self): """Return this cookbook's Berksfile instance.""" self.berks_path = os.path.join(self.path, 'Berksfile') if not self._berksfile: if not os.path.isfile(self.berks_path): raise ValueError("No Berksfile found at %s" % self.berks_path) self._berksfile = Berksfile(open(self.berks_path, 'r+')) return self._berksfile class MetadataRb(utils.FileWrapper): """Wrapper for a metadata.rb file.""" @classmethod def from_dict(cls, dictionary): """Create a MetadataRb instance from a dict.""" cookbooks = set() # put these in order groups = [cookbooks] for key, val in dictionary.items(): if key == 'depends': cookbooks.update({cls.depends_statement(cbn, meta) for cbn, meta in val.items()}) body = '' for group in groups: if group: body += '\n' body += '\n'.join(group) return cls.from_string(body) @staticmethod def depends_statement(cookbook_name, metadata=None): """Return a valid Ruby 'depends' statement for the metadata.rb file.""" line = "depends '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Stencil dependency options for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) if metadata: line = "%s '%s'" % (line, "', '".join(metadata)) return line def to_dict(self): """Return a dictionary representation of this metadata.rb file.""" return self.parse() def parse(self): """Parse the metadata.rb into a dict.""" data = utils.ruby_lines(self.readlines()) data = [tuple(j.strip() for j in line.split(None, 1)) for line in data] depends = {} for line in data: if not len(line) == 2: continue key, value = line if key == 'depends': value = value.split(',') lib = utils.ruby_strip(value[0]) detail = [utils.ruby_strip(j) for j in value[1:]] depends[lib] = detail datamap = {key: utils.ruby_strip(val) for key, val in data} if depends: datamap['depends'] = depends self.seek(0) return datamap def merge(self, other): """Add requirements from 'other' metadata.rb into this one.""" if not isinstance(other, MetadataRb): raise TypeError("MetadataRb to merge should be a 'MetadataRb' " "instance, not %s.", type(other)) current = self.to_dict() new = other.to_dict() # compare and gather cookbook dependencies meta_writelines = ['%s\n' % self.depends_statement(cbn, meta) for cbn, meta in new.get('depends', {}).items() if cbn not in current.get('depends', {})] self.write_statements(meta_writelines) return self.to_dict() class Berksfile(utils.FileWrapper): """Wrapper for a Berksfile.""" berks_options = [ 'branch', 'git', 'path', 'ref', 'revision', 'tag', ] def to_dict(self): """Return a dictionary representation of this Berksfile.""" return self.parse() def parse(self): """Parse this Berksfile into a dict.""" self.flush() self.seek(0) data = utils.ruby_lines(self.readlines()) data = [tuple(j.strip() for j in line.split(None, 1)) for line in data] datamap = {} for line in data: if len(line) == 1: datamap[line[0]] = True elif len(line) == 2: key, value = line if key == 'cookbook': datamap.setdefault('cookbook', {}) value = [utils.ruby_strip(v) for v in value.split(',')] lib, detail = value[0], value[1:] datamap['cookbook'].setdefault(lib, {}) # if there is additional dependency data but its # not the ruby hash, its the version constraint if detail and not any("".join(detail).startswith(o) for o in self.berks_options): constraint, detail = detail[0], detail[1:] datamap['cookbook'][lib]['constraint'] = constraint if detail: for deet in detail: opt, val = [ utils.ruby_strip(i) for i in deet.split(':', 1) ] if not any(opt == o for o in self.berks_options): raise ValueError( "Cookbook detail '%s' does not specify " "one of '%s'" % (opt, self.berks_options)) else: datamap['cookbook'][lib][opt.strip(':')] = ( utils.ruby_strip(val)) elif key == 'source': datamap.setdefault(key, []) datamap[key].append(utils.ruby_strip(value)) elif key: datamap[key] = utils.ruby_strip(value) self.seek(0) return datamap @classmethod def from_dict(cls, dictionary): """Create a Berksfile instance from a dict.""" cookbooks = set() sources = set() other = set() # put these in order groups = [sources, cookbooks, other] for key, val in dictionary.items(): if key == 'cookbook': cookbooks.update({cls.cookbook_statement(cbn, meta) for cbn, meta in val.items()}) elif key == 'source': sources.update({"source '%s'" % src for src in val}) elif key == 'metadata': other.add('metadata') body = '' for group in groups: if group: body += '\n' body += '\n'.join(group) return cls.from_string(body) @staticmethod def cookbook_statement(cookbook_name, metadata=None): """Return a valid Ruby 'cookbook' statement for the Berksfile.""" line = "cookbook '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Berksfile dependency hash for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) # not like the others... if 'constraint' in metadata: line += ", '%s'" % metadata.pop('constraint') for opt, spec in metadata.items(): line += ", %s: '%s'" % (opt, spec) return line def merge(self, other): """Add requirements from 'other' Berksfile into this one.""" if not isinstance(other, Berksfile): raise TypeError("Berksfile to merge should be a 'Berksfile' " "instance, not %s.", type(other)) current = self.to_dict() new = other.to_dict() # compare and gather cookbook dependencies berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta) for cbn, meta in new.get('cookbook', {}).items() if cbn not in current.get('cookbook', {})] # compare and gather 'source' requirements berks_writelines.extend(["source '%s'\n" % src for src in new.get('source', []) if src not in current.get('source', [])]) self.write_statements(berks_writelines) return self.to_dict()
Java
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.search.aggregations.bucket.composite; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; import static org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes; import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; final class CompositeValuesComparator { private final int size; private final CompositeValuesSource<?, ?>[] arrays; private boolean topValueSet = false; /** * * @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets. * @param size The number of composite buckets to keep. */ CompositeValuesComparator(IndexReader reader, CompositeValuesSourceConfig[] sources, int size) { this.size = size; this.arrays = new CompositeValuesSource<?, ?>[sources.length]; for (int i = 0; i < sources.length; i++) { final int reverseMul = sources[i].reverseMul(); if (sources[i].valuesSource() instanceof WithOrdinals && reader instanceof DirectoryReader) { WithOrdinals vs = (WithOrdinals) sources[i].valuesSource(); arrays[i] = CompositeValuesSource.wrapGlobalOrdinals(vs, size, reverseMul); } else if (sources[i].valuesSource() instanceof Bytes) { Bytes vs = (Bytes) sources[i].valuesSource(); arrays[i] = CompositeValuesSource.wrapBinary(vs, size, reverseMul); } else if (sources[i].valuesSource() instanceof Numeric) { final Numeric vs = (Numeric) sources[i].valuesSource(); if (vs.isFloatingPoint()) { arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul); } else { arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul); } } } } /** * Moves the values in <code>slot1</code> to <code>slot2</code>. */ void move(int slot1, int slot2) { assert slot1 < size && slot2 < size; for (int i = 0; i < arrays.length; i++) { arrays[i].move(slot1, slot2); } } /** * Compares the values in <code>slot1</code> with <code>slot2</code>. */ int compare(int slot1, int slot2) { assert slot1 < size && slot2 < size; for (int i = 0; i < arrays.length; i++) { int cmp = arrays[i].compare(slot1, slot2); if (cmp != 0) { return cmp; } } return 0; } /** * Returns true if a top value has been set for this comparator. */ boolean hasTop() { return topValueSet; } /** * Sets the top values for this comparator. */ void setTop(Comparable<?>[] values) { assert values.length == arrays.length; topValueSet = true; for (int i = 0; i < arrays.length; i++) { arrays[i].setTop(values[i]); } } /** * Compares the top values with the values in <code>slot</code>. */ int compareTop(int slot) { assert slot < size; for (int i = 0; i < arrays.length; i++) { int cmp = arrays[i].compareTop(slot); if (cmp != 0) { return cmp; } } return 0; } /** * Builds the {@link CompositeKey} for <code>slot</code>. */ CompositeKey toCompositeKey(int slot) throws IOException { assert slot < size; Comparable<?>[] values = new Comparable<?>[arrays.length]; for (int i = 0; i < values.length; i++) { values[i] = arrays[i].toComparable(slot); } return new CompositeKey(values); } /** * Gets the {@link LeafBucketCollector} that will record the composite buckets of the visited documents. */ CompositeValuesSource.Collector getLeafCollector(LeafReaderContext context, CompositeValuesSource.Collector in) throws IOException { int last = arrays.length - 1; CompositeValuesSource.Collector next = arrays[last].getLeafCollector(context, in); for (int i = last - 1; i >= 0; i--) { next = arrays[i].getLeafCollector(context, next); } return next; } }
Java
<?php App::uses('AppModel', 'Model'); App::uses('SimplePasswordHasher', 'Controller/Component/Auth'); class Product extends AppModel { }
Java
class UsersController < ApplicationController include BatchProcessable # special find method before load_resource before_filter :build_user_with_proper_mission, :only => [:new, :create] # authorization via CanCan load_and_authorize_resource def index # sort and eager load @users = @users.by_name # if there is a search with the '.' character in it, we can't eager load due to a bug in Rails # this should be fixed in Rails 4 unless params[:search].present? && params[:search].match(/\./) @users = @users.with_assoc end # do search if applicable if params[:search].present? begin @users = User.do_search(@users, params[:search]) rescue Search::ParseError flash.now[:error] = $!.to_s @search_error = true end end end def new # set the default pref_lang based on the mission settings prepare_and_render_form end def show prepare_and_render_form end def edit prepare_and_render_form end def create if @user.save @user.reset_password_if_requested set_success(@user) # render printable instructions if requested handle_printable_instructions # if create failed, render the form again else flash.now[:error] = I18n.t('activerecord.errors.models.user.general') prepare_and_render_form end end def update # make sure changing assignment role is permitted if attempting authorize!(:change_assignments, @user) if params[:user]['assignments_attributes'] @user.assign_attributes(params[:user]) pref_lang_changed = @user.pref_lang_changed? if @user.save if @user == current_user I18n.locale = @user.pref_lang.to_sym if pref_lang_changed flash[:success] = t("user.profile_updated") redirect_to(:action => :edit) else set_success(@user) # if the user's password was reset, do it, and show instructions if requested @user.reset_password_if_requested handle_printable_instructions end # if save failed, render the form again else flash.now[:error] = I18n.t('activerecord.errors.models.user.general') prepare_and_render_form end end def destroy destroy_and_handle_errors(@user) redirect_to(index_url_with_page_num) end # shows printable login instructions for the user def login_instructions end # exports the selected users to VCF format def export respond_to do |format| format.vcf do @users = params[:selected] ? load_selected_objects(User) : [] render(:text => @users.collect{|u| u.to_vcf}.join("\n")) end end end def regenerate_key @user = User.find(params[:id]) @user.regenerate_api_key redirect_to(:action => :edit) end private # if we need to print instructions, redirects to the instructions action. otherwise redirects to index. def handle_printable_instructions if @user.reset_password_method == "print" # save the password in the flash since we won't be able to get it once it's crypted flash[:password] = @user.password redirect_to(:action => :login_instructions, :id => @user.id) else redirect_to(index_url_with_page_num) end end # prepares objects and renders the form template def prepare_and_render_form if admin_mode? # get assignable missons and roles for this user @assignments = @user.assignments.as_json(:include => :mission, :methods => :new_record?) @assignment_permissions = @user.assignments.map{|a| can?(:update, a)} @assignable_missions = Mission.accessible_by(current_ability, :assign_to).sorted_by_name.as_json(:only => [:id, :name]) @assignable_roles = Ability.assignable_roles(current_user) else @current_assignment = @user.assignments_by_mission[current_mission] || @user.assignments.build(:mission => current_mission) end render(:form) end # builds a user with an appropriate mission assignment if the current_user doesn't have permission to edit a blank user def build_user_with_proper_mission @user = User.new(params[:user]) if cannot?(:create, @user) && @user.assignments.empty? @user.assignments.build(:mission => current_mission) end end end
Java
<?php final class PhabricatorProjectIcon extends Phobject { public static function getIconMap() { return array( 'fa-briefcase' => pht('Briefcase'), 'fa-tags' => pht('Tag'), 'fa-folder' => pht('Folder'), 'fa-users' => pht('Team'), 'fa-bug' => pht('Bug'), 'fa-trash-o' => pht('Garbage'), 'fa-calendar' => pht('Deadline'), 'fa-flag-checkered' => pht('Goal'), 'fa-envelope' => pht('Communication'), 'fa-truck' => pht('Release'), 'fa-lock' => pht('Policy'), 'fa-umbrella' => pht('An Umbrella'), 'fa-cloud' => pht('The Cloud'), 'fa-building' => pht('Company'), 'fa-credit-card' => pht('Accounting'), 'fa-flask' => pht('Experimental'), ); } public static function getColorMap() { $shades = PHUITagView::getShadeMap(); $shades = array_select_keys( $shades, array(PhabricatorProject::DEFAULT_COLOR)) + $shades; unset($shades[PHUITagView::COLOR_DISABLED]); return $shades; } public static function getLabel($key) { $map = self::getIconMap(); return $map[$key]; } public static function getAPIName($key) { return substr($key, 3); } public static function renderIconForChooser($icon) { $project_icons = PhabricatorProjectIcon::getIconMap(); return phutil_tag( 'span', array(), array( id(new PHUIIconView())->setIconFont($icon), ' ', idx($project_icons, $icon, pht('Unknown Icon')), )); } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tamaya.events; import org.apache.tamaya.ConfigException; import org.apache.tamaya.ConfigOperator; import org.apache.tamaya.ConfigQuery; import org.apache.tamaya.Configuration; import org.apache.tamaya.ConfigurationProvider; import org.apache.tamaya.TypeLiteral; import org.apache.tamaya.spi.PropertyConverter; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; /** * Created by Anatole on 24.03.2015. */ public class TestConfigView implements ConfigOperator{ private static final TestConfigView INSTANCE = new TestConfigView(); private TestConfigView(){} public static ConfigOperator of(){ return INSTANCE; } @Override public Configuration operate(final Configuration config) { return new Configuration() { @Override public Map<String, String> getProperties() { Map<String, String> result = new HashMap<>(); for (Map.Entry<String, String> en : config.getProperties().entrySet()) { if (en.getKey().startsWith("test")) { result.put(en.getKey(), en.getValue()); } } return result; // return config.getProperties().entrySet().stream().filter(e -> e.getKey().startsWith("test")).collect( // Collectors.toMap(en -> en.getKey(), en -> en.getValue())); } @Override public Configuration with(ConfigOperator operator) { return null; } @Override public <T> T query(ConfigQuery<T> query) { return null; } @Override public String get(String key) { return getProperties().get(key); } @Override public <T> T get(String key, Class<T> type) { return (T) get(key, TypeLiteral.of(type)); } /** * Accesses the current String value for the given key and tries to convert it * using the {@link org.apache.tamaya.spi.PropertyConverter} instances provided by the current * {@link org.apache.tamaya.spi.ConfigurationContext}. * * @param key the property's absolute, or relative path, e.g. @code * a/b/c/d.myProperty}. * @param type The target type required, not null. * @param <T> the value type * @return the converted value, never null. */ @Override public <T> T get(String key, TypeLiteral<T> type) { String value = get(key); if (value != null) { List<PropertyConverter<T>> converters = ConfigurationProvider.getConfigurationContext() .getPropertyConverters(type); for (PropertyConverter<T> converter : converters) { try { T t = converter.convert(value); if (t != null) { return t; } } catch (Exception e) { Logger.getLogger(getClass().getName()) .log(Level.FINEST, "PropertyConverter: " + converter + " failed to convert value: " + value, e); } } throw new ConfigException("Unparseable config value for type: " + type.getRawType().getName() + ": " + key); } return null; } }; } }
Java
<?php /*+*********************************************************************************** * The contents of this file are subject to the vtiger CRM Public License Version 1.0 * ("License"); You may not use this file except in compliance with the License * The Original Code is: vtiger CRM Open Source * The Initial Developer of the Original Code is vtiger. * Portions created by vtiger are Copyright (C) vtiger. * All Rights Reserved. *************************************************************************************/ class Products_Module_Model extends Vtiger_Module_Model { /** * Function to get list view query for popup window * @param <String> $sourceModule Parent module * @param <String> $field parent fieldname * @param <Integer> $record parent id * @param <String> $listQuery * @return <String> Listview Query */ public function getQueryByModuleField($sourceModule, $field, $record, $listQuery) { $supportedModulesList = array($this->getName(), 'Vendors', 'Leads', 'Accounts', 'Contacts', 'Potentials'); if (($sourceModule == 'PriceBooks' && $field == 'priceBookRelatedList') || in_array($sourceModule, $supportedModulesList) || in_array($sourceModule, getInventoryModules())) { $condition = " vtiger_products.discontinued = 1 "; if ($sourceModule === $this->getName()) { $condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_seproductsrel WHERE crmid = '$record' UNION SELECT crmid FROM vtiger_seproductsrel WHERE productid = '$record') AND vtiger_products.productid <> '$record' "; } elseif ($sourceModule === 'PriceBooks') { $condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_pricebookproductrel WHERE pricebookid = '$record') "; } elseif ($sourceModule === 'Vendors') { $condition .= " AND vtiger_products.vendor_id != '$record' "; } elseif (in_array($sourceModule, $supportedModulesList)) { $condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_seproductsrel WHERE crmid = '$record')"; } $pos = stripos($listQuery, 'where'); if ($pos) { $split = spliti('where', $listQuery); $overRideQuery = $split[0] . ' WHERE ' . $split[1] . ' AND ' . $condition; } else { $overRideQuery = $listQuery. ' WHERE ' . $condition; } return $overRideQuery; } } /** * Function to get Specific Relation Query for this Module * @param <type> $relatedModule * @return <type> */ public function getSpecificRelationQuery($relatedModule) { if ($relatedModule === 'Leads') { $specificQuery = 'AND vtiger_leaddetails.converted = 0'; return $specificQuery; } return parent::getSpecificRelationQuery($relatedModule); } /** * Function to get prices for specified products with specific currency * @param <Integer> $currenctId * @param <Array> $productIdsList * @return <Array> */ public function getPricesForProducts($currencyId, $productIdsList) { return getPricesForProducts($currencyId, $productIdsList, $this->getName()); } /** * Function to check whether the module is summary view supported * @return <Boolean> - true/false */ public function isSummaryViewSupported() { return false; } /** * Function searches the records in the module, if parentId & parentModule * is given then searches only those records related to them. * @param <String> $searchValue - Search value * @param <Integer> $parentId - parent recordId * @param <String> $parentModule - parent module name * @return <Array of Vtiger_Record_Model> */ public function searchRecord($searchValue, $parentId=false, $parentModule=false, $relatedModule=false) { if(!empty($searchValue) && empty($parentId) && empty($parentModule) && (in_array($relatedModule, getInventoryModules()))) { $matchingRecords = Products_Record_Model::getSearchResult($searchValue, $this->getName()); }else { return parent::searchRecord($searchValue); } return $matchingRecords; } /** * Function returns query for Product-PriceBooks relation * @param <Vtiger_Record_Model> $recordModel * @param <Vtiger_Record_Model> $relatedModuleModel * @return <String> */ function get_product_pricebooks($recordModel, $relatedModuleModel) { $query = 'SELECT vtiger_pricebook.pricebookid, vtiger_pricebook.bookname, vtiger_pricebook.active, vtiger_crmentity.crmid, vtiger_crmentity.smownerid, vtiger_pricebookproductrel.listprice, vtiger_products.unit_price FROM vtiger_pricebook INNER JOIN vtiger_pricebookproductrel ON vtiger_pricebook.pricebookid = vtiger_pricebookproductrel.pricebookid INNER JOIN vtiger_crmentity on vtiger_crmentity.crmid = vtiger_pricebook.pricebookid INNER JOIN vtiger_products on vtiger_products.productid = vtiger_pricebookproductrel.productid INNER JOIN vtiger_pricebookcf on vtiger_pricebookcf.pricebookid = vtiger_pricebook.pricebookid LEFT JOIN vtiger_users ON vtiger_users.id=vtiger_crmentity.smownerid LEFT JOIN vtiger_groups ON vtiger_groups.groupid = vtiger_crmentity.smownerid ' . Users_Privileges_Model::getNonAdminAccessControlQuery($relatedModuleModel->getName()) .' WHERE vtiger_products.productid = '.$recordModel->getId().' and vtiger_crmentity.deleted = 0'; return $query; } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.functions.aggfunctions; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.expressions.Expression; import org.apache.flink.table.expressions.UnresolvedCallExpression; import org.apache.flink.table.expressions.UnresolvedReferenceExpression; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.DecimalType; import org.apache.flink.table.types.logical.utils.LogicalTypeMerging; import java.math.BigDecimal; import static org.apache.flink.table.expressions.ApiExpressionUtils.unresolvedRef; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.cast; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.div; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.equalTo; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.ifThenElse; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.isNull; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.literal; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.minus; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.nullOf; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.plus; import static org.apache.flink.table.planner.expressions.ExpressionBuilder.typeLiteral; /** built-in avg aggregate function. */ public abstract class AvgAggFunction extends DeclarativeAggregateFunction { private UnresolvedReferenceExpression sum = unresolvedRef("sum"); private UnresolvedReferenceExpression count = unresolvedRef("count"); public abstract DataType getSumType(); @Override public int operandCount() { return 1; } @Override public UnresolvedReferenceExpression[] aggBufferAttributes() { return new UnresolvedReferenceExpression[] {sum, count}; } @Override public DataType[] getAggBufferTypes() { return new DataType[] {getSumType(), DataTypes.BIGINT()}; } @Override public Expression[] initialValuesExpressions() { return new Expression[] { /* sum = */ literal(0L, getSumType().notNull()), /* count = */ literal(0L) }; } @Override public Expression[] accumulateExpressions() { return new Expression[] { /* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, plus(sum, operand(0)))), /* count = */ ifThenElse(isNull(operand(0)), count, plus(count, literal(1L))), }; } @Override public Expression[] retractExpressions() { return new Expression[] { /* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, minus(sum, operand(0)))), /* count = */ ifThenElse(isNull(operand(0)), count, minus(count, literal(1L))), }; } @Override public Expression[] mergeExpressions() { return new Expression[] { /* sum = */ adjustSumType(plus(sum, mergeOperand(sum))), /* count = */ plus(count, mergeOperand(count)) }; } private UnresolvedCallExpression adjustSumType(UnresolvedCallExpression sumExpr) { return cast(sumExpr, typeLiteral(getSumType())); } /** If all input are nulls, count will be 0 and we will get null after the division. */ @Override public Expression getValueExpression() { Expression ifTrue = nullOf(getResultType()); Expression ifFalse = cast(div(sum, count), typeLiteral(getResultType())); return ifThenElse(equalTo(count, literal(0L)), ifTrue, ifFalse); } /** Built-in Byte Avg aggregate function. */ public static class ByteAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.TINYINT(); } @Override public DataType getSumType() { return DataTypes.BIGINT(); } } /** Built-in Short Avg aggregate function. */ public static class ShortAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.SMALLINT(); } @Override public DataType getSumType() { return DataTypes.BIGINT(); } } /** Built-in Integer Avg aggregate function. */ public static class IntAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.INT(); } @Override public DataType getSumType() { return DataTypes.BIGINT(); } } /** Built-in Long Avg aggregate function. */ public static class LongAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.BIGINT(); } @Override public DataType getSumType() { return DataTypes.BIGINT(); } } /** Built-in Float Avg aggregate function. */ public static class FloatAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.FLOAT(); } @Override public DataType getSumType() { return DataTypes.DOUBLE(); } @Override public Expression[] initialValuesExpressions() { return new Expression[] {literal(0D), literal(0L)}; } } /** Built-in Double Avg aggregate function. */ public static class DoubleAvgAggFunction extends AvgAggFunction { @Override public DataType getResultType() { return DataTypes.DOUBLE(); } @Override public DataType getSumType() { return DataTypes.DOUBLE(); } @Override public Expression[] initialValuesExpressions() { return new Expression[] {literal(0D), literal(0L)}; } } /** Built-in Decimal Avg aggregate function. */ public static class DecimalAvgAggFunction extends AvgAggFunction { private final DecimalType type; public DecimalAvgAggFunction(DecimalType type) { this.type = type; } @Override public DataType getResultType() { DecimalType t = (DecimalType) LogicalTypeMerging.findAvgAggType(type); return DataTypes.DECIMAL(t.getPrecision(), t.getScale()); } @Override public DataType getSumType() { DecimalType t = (DecimalType) LogicalTypeMerging.findSumAggType(type); return DataTypes.DECIMAL(t.getPrecision(), t.getScale()); } @Override public Expression[] initialValuesExpressions() { return new Expression[] {literal(BigDecimal.ZERO, getSumType().notNull()), literal(0L)}; } } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Defines relational expressions and rules for converting between calling * conventions. */ package org.apache.calcite.rel.convert; // End package-info.java
Java
/*Copyright (C) 2012 Longerian (http://www.longerian.me) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.*/ package org.rubychina.android; import java.util.ArrayList; import java.util.List; import org.rubychina.android.type.Node; import org.rubychina.android.type.SiteGroup; import org.rubychina.android.type.Topic; import org.rubychina.android.type.User; public enum GlobalResource { INSTANCE; private List<Topic> curTopics = new ArrayList<Topic>(); private List<Node> nodes = new ArrayList<Node>(); private List<User> users = new ArrayList<User>(); private List<SiteGroup> sites = new ArrayList<SiteGroup>(); public synchronized List<Topic> getCurTopics() { return curTopics; } public synchronized void setCurTopics(List<Topic> curTopics) { this.curTopics = curTopics; } public synchronized List<Node> getNodes() { return nodes; } public synchronized void setNodes(List<Node> nodes) { this.nodes = nodes; } public synchronized List<User> getUsers() { return users; } public synchronized void setUsers(List<User> users) { this.users = users; } public synchronized List<SiteGroup> getSites() { return sites; } public synchronized void setSites(List<SiteGroup> sites) { this.sites = sites; } }
Java
package set import ( "encoding/json" "errors" "fmt" "io" "os" "path" "strings" "github.com/golang/glog" "github.com/spf13/cobra" kapi "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" kresource "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" kclient "k8s.io/kubernetes/pkg/client/unversioned" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "github.com/openshift/origin/pkg/cmd/templates" cmdutil "github.com/openshift/origin/pkg/cmd/util" "github.com/openshift/origin/pkg/cmd/util/clientcmd" ) const ( volumePrefix = "volume-" storageAnnClass = "volume.beta.kubernetes.io/storage-class" ) var ( volumeLong = templates.LongDesc(` Update volumes on a pod template This command can add, update or remove volumes from containers for any object that has a pod template (deployment configs, replication controllers, or pods). You can list volumes in pod or any object that has a pod template. You can specify a single object or multiple, and alter volumes on all containers or just those that match a given name. If you alter a volume setting on a deployment config, a deployment will be triggered. Changing a replication controller will not affect running pods, and you cannot change a pod's volumes once it has been created. Volume types include: * emptydir (empty directory) *default*: A directory allocated when the pod is created on a local host, is removed when the pod is deleted and is not copied across servers * hostdir (host directory): A directory with specific path on any host (requires elevated privileges) * persistentvolumeclaim or pvc (persistent volume claim): Link the volume directory in the container to a persistent volume claim you have allocated by name - a persistent volume claim is a request to allocate storage. Note that if your claim hasn't been bound, your pods will not start. * secret (mounted secret): Secret volumes mount a named secret to the provided directory. For descriptions on other volume types, see https://docs.openshift.com`) volumeExample = templates.Examples(` # List volumes defined on all deployment configs in the current project %[1]s volume dc --all # Add a new empty dir volume to deployment config (dc) 'registry' mounted under # /var/lib/registry %[1]s volume dc/registry --add --mount-path=/var/lib/registry # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' %[1]s volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite # Remove volume 'v1' from deployment config 'registry' %[1]s volume dc/registry --remove --name=v1 # Create a new persistent volume claim that overwrites an existing volume 'v1' %[1]s volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite # Change the mount point for volume 'v1' to /data %[1]s volume dc/registry --add --name=v1 -m /data --overwrite # Modify the deployment config by removing volume mount "v1" from container "c1" # (and by removing the volume "v1" if no other containers have volume mounts that reference it) %[1]s volume dc/registry --remove --name=v1 --containers=c1 # Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD, # Ceph, Gluster, NFS, ISCSI, ...) %[1]s volume dc/registry --add -m /repo --source=<json-string>`) ) type VolumeOptions struct { DefaultNamespace string ExplicitNamespace bool Out io.Writer Err io.Writer Mapper meta.RESTMapper Typer runtime.ObjectTyper RESTClientFactory func(mapping *meta.RESTMapping) (resource.RESTClient, error) UpdatePodSpecForObject func(obj runtime.Object, fn func(*kapi.PodSpec) error) (bool, error) Client kclient.PersistentVolumeClaimsNamespacer Encoder runtime.Encoder // Resource selection Selector string All bool Filenames []string // Operations Add bool Remove bool List bool // Common optional params Name string Containers string Confirm bool Output string PrintObject func([]*resource.Info) error OutputVersion unversioned.GroupVersion // Add op params AddOpts *AddVolumeOptions } type AddVolumeOptions struct { Type string MountPath string Overwrite bool Path string ConfigMapName string SecretName string Source string CreateClaim bool ClaimName string ClaimSize string ClaimMode string ClaimClass string TypeChanged bool } func NewCmdVolume(fullName string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command { addOpts := &AddVolumeOptions{} opts := &VolumeOptions{AddOpts: addOpts} cmd := &cobra.Command{ Use: "volumes RESOURCE/NAME --add|--remove|--list", Short: "Update volumes on a pod template", Long: volumeLong, Example: fmt.Sprintf(volumeExample, fullName), Aliases: []string{"volume"}, Run: func(cmd *cobra.Command, args []string) { addOpts.TypeChanged = cmd.Flag("type").Changed err := opts.Validate(cmd, args) if err != nil { kcmdutil.CheckErr(kcmdutil.UsageError(cmd, err.Error())) } err = opts.Complete(f, cmd, out, errOut) kcmdutil.CheckErr(err) err = opts.RunVolume(args) if err == cmdutil.ErrExit { os.Exit(1) } kcmdutil.CheckErr(err) }, } cmd.Flags().StringVarP(&opts.Selector, "selector", "l", "", "Selector (label query) to filter on") cmd.Flags().BoolVar(&opts.All, "all", false, "select all resources in the namespace of the specified resource types") cmd.Flags().StringSliceVarP(&opts.Filenames, "filename", "f", opts.Filenames, "Filename, directory, or URL to file to use to edit the resource.") cmd.Flags().BoolVar(&opts.Add, "add", false, "Add volume and/or volume mounts for containers") cmd.Flags().BoolVar(&opts.Remove, "remove", false, "Remove volume and/or volume mounts for containers") cmd.Flags().BoolVar(&opts.List, "list", false, "List volumes and volume mounts for containers") cmd.Flags().StringVar(&opts.Name, "name", "", "Name of the volume. If empty, auto generated for add operation") cmd.Flags().StringVarP(&opts.Containers, "containers", "c", "*", "The names of containers in the selected pod templates to change - may use wildcards") cmd.Flags().BoolVar(&opts.Confirm, "confirm", false, "Confirm that you really want to remove multiple volumes") cmd.Flags().StringVarP(&addOpts.Type, "type", "t", "", "Type of the volume source for add operation. Supported options: emptyDir, hostPath, secret, configmap, persistentVolumeClaim") cmd.Flags().StringVarP(&addOpts.MountPath, "mount-path", "m", "", "Mount path inside the container. Optional param for --add or --remove") cmd.Flags().BoolVar(&addOpts.Overwrite, "overwrite", false, "If true, replace existing volume source and/or volume mount for the given resource") cmd.Flags().StringVar(&addOpts.Path, "path", "", "Host path. Must be provided for hostPath volume type") cmd.Flags().StringVar(&addOpts.ConfigMapName, "configmap-name", "", "Name of the persisted config map. Must be provided for configmap volume type") cmd.Flags().StringVar(&addOpts.SecretName, "secret-name", "", "Name of the persisted secret. Must be provided for secret volume type") cmd.Flags().StringVar(&addOpts.ClaimName, "claim-name", "", "Persistent volume claim name. Must be provided for persistentVolumeClaim volume type") cmd.Flags().StringVar(&addOpts.ClaimClass, "claim-class", "", "StorageClass to use for the persistent volume claim") cmd.Flags().StringVar(&addOpts.ClaimSize, "claim-size", "", "If specified along with a persistent volume type, create a new claim with the given size in bytes. Accepts SI notation: 10, 10G, 10Gi") cmd.Flags().StringVar(&addOpts.ClaimMode, "claim-mode", "ReadWriteOnce", "Set the access mode of the claim to be created. Valid values are ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)") cmd.Flags().StringVar(&addOpts.Source, "source", "", "Details of volume source as json string. This can be used if the required volume type is not supported by --type option. (e.g.: '{\"gitRepo\": {\"repository\": <git-url>, \"revision\": <commit-hash>}}')") kcmdutil.AddPrinterFlags(cmd) cmd.MarkFlagFilename("filename", "yaml", "yml", "json") // deprecate --list option cmd.Flags().MarkDeprecated("list", "Volumes and volume mounts can be listed by providing a resource with no additional options.") return cmd } func (v *VolumeOptions) Validate(cmd *cobra.Command, args []string) error { if len(v.Selector) > 0 { if _, err := labels.Parse(v.Selector); err != nil { return errors.New("--selector=<selector> must be a valid label selector") } if v.All { return errors.New("you may specify either --selector or --all but not both") } } if len(v.Filenames) == 0 && len(args) < 1 { return errors.New("provide one or more resources to add, list, or delete volumes on as TYPE/NAME") } numOps := 0 if v.Add { numOps++ } if v.Remove { numOps++ } if v.List { numOps++ } switch { case numOps == 0: v.List = true case numOps > 1: return errors.New("you may only specify one operation at a time") } output := kcmdutil.GetFlagString(cmd, "output") if v.List && len(output) > 0 { return errors.New("--list and --output may not be specified together") } err := v.AddOpts.Validate(v.Add) if err != nil { return err } // Removing all volumes for the resource type needs confirmation if v.Remove && len(v.Name) == 0 && !v.Confirm { return errors.New("must provide --confirm for removing more than one volume") } return nil } func (a *AddVolumeOptions) Validate(isAddOp bool) error { if isAddOp { if len(a.Type) == 0 && (len(a.ClaimName) > 0 || len(a.ClaimSize) > 0) { a.Type = "persistentvolumeclaim" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.SecretName) > 0) { a.Type = "secret" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.ConfigMapName) > 0) { a.Type = "configmap" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.Path) > 0) { a.Type = "hostpath" a.TypeChanged = true } if len(a.Type) == 0 { a.Type = "emptydir" } if len(a.Type) == 0 && len(a.Source) == 0 { return errors.New("must provide --type or --source for --add operation") } else if a.TypeChanged && len(a.Source) > 0 { return errors.New("either specify --type or --source but not both for --add operation") } if len(a.Type) > 0 { switch strings.ToLower(a.Type) { case "emptydir": case "hostpath": if len(a.Path) == 0 { return errors.New("must provide --path for --type=hostPath") } case "secret": if len(a.SecretName) == 0 { return errors.New("must provide --secret-name for --type=secret") } case "configmap": if len(a.ConfigMapName) == 0 { return errors.New("must provide --configmap-name for --type=configmap") } case "persistentvolumeclaim", "pvc": if len(a.ClaimName) == 0 && len(a.ClaimSize) == 0 { return errors.New("must provide --claim-name or --claim-size (to create a new claim) for --type=pvc") } default: return errors.New("invalid volume type. Supported types: emptyDir, hostPath, secret, persistentVolumeClaim") } } else if len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ClaimName) > 0 { return errors.New("--path|--secret-name|--claim-name are only valid for --type option") } if len(a.Source) > 0 { var source map[string]interface{} err := json.Unmarshal([]byte(a.Source), &source) if err != nil { return err } if len(source) > 1 { return errors.New("must provide only one volume for --source") } var vs kapi.VolumeSource err = json.Unmarshal([]byte(a.Source), &vs) if err != nil { return err } } if len(a.ClaimClass) > 0 { selectedLowerType := strings.ToLower(a.Type) if selectedLowerType != "persistentvolumeclaim" && selectedLowerType != "pvc" { return errors.New("must provide --type as persistentVolumeClaim") } if len(a.ClaimSize) == 0 { return errors.New("must provide --claim-size to create new pvc with claim-class") } } } else if len(a.Source) > 0 || len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ConfigMapName) > 0 || len(a.ClaimName) > 0 || a.Overwrite { return errors.New("--type|--path|--configmap-name|--secret-name|--claim-name|--source|--overwrite are only valid for --add operation") } return nil } func (v *VolumeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, out, errOut io.Writer) error { clientConfig, err := f.ClientConfig() if err != nil { return err } v.OutputVersion, err = kcmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } _, kc, err := f.Clients() if err != nil { return err } v.Client = kc cmdNamespace, explicit, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object(false) v.Output = kcmdutil.GetFlagString(cmd, "output") if len(v.Output) > 0 { v.PrintObject = func(infos []*resource.Info) error { return f.PrintResourceInfos(cmd, infos, v.Out) } } v.DefaultNamespace = cmdNamespace v.ExplicitNamespace = explicit v.Out = out v.Err = errOut v.Mapper = mapper v.Typer = typer v.RESTClientFactory = f.Factory.ClientForMapping v.UpdatePodSpecForObject = f.UpdatePodSpecForObject v.Encoder = f.JSONEncoder() // In case of volume source ignore the default volume type if len(v.AddOpts.Source) > 0 { v.AddOpts.Type = "" } if len(v.AddOpts.ClaimSize) > 0 { v.AddOpts.CreateClaim = true if len(v.AddOpts.ClaimName) == 0 { v.AddOpts.ClaimName = kapi.SimpleNameGenerator.GenerateName("pvc-") } q, err := kresource.ParseQuantity(v.AddOpts.ClaimSize) if err != nil { return fmt.Errorf("--claim-size is not valid: %v", err) } v.AddOpts.ClaimSize = q.String() } switch strings.ToLower(v.AddOpts.ClaimMode) { case strings.ToLower(string(kapi.ReadOnlyMany)), "rom": v.AddOpts.ClaimMode = string(kapi.ReadOnlyMany) case strings.ToLower(string(kapi.ReadWriteOnce)), "rwo": v.AddOpts.ClaimMode = string(kapi.ReadWriteOnce) case strings.ToLower(string(kapi.ReadWriteMany)), "rwm": v.AddOpts.ClaimMode = string(kapi.ReadWriteMany) case "": default: return errors.New("--claim-mode must be one of ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)") } return nil } func (v *VolumeOptions) RunVolume(args []string) error { mapper := resource.ClientMapperFunc(v.RESTClientFactory) b := resource.NewBuilder(v.Mapper, v.Typer, mapper, kapi.Codecs.UniversalDecoder()). ContinueOnError(). NamespaceParam(v.DefaultNamespace).DefaultNamespace(). FilenameParam(v.ExplicitNamespace, false, v.Filenames...). SelectorParam(v.Selector). ResourceTypeOrNameArgs(v.All, args...). Flatten() singular := false infos, err := b.Do().IntoSingular(&singular).Infos() if err != nil { return err } if v.List { listingErrors := v.printVolumes(infos) if len(listingErrors) > 0 { return cmdutil.ErrExit } return nil } updateInfos := []*resource.Info{} // if a claim should be created, generate the info we'll add to the flow if v.Add && v.AddOpts.CreateClaim { claim := v.AddOpts.createClaim() m, err := v.Mapper.RESTMapping(kapi.Kind("PersistentVolumeClaim")) if err != nil { return err } client, err := mapper.ClientForMapping(m) if err != nil { return err } info := &resource.Info{ Mapping: m, Client: client, Namespace: v.DefaultNamespace, Object: claim, } infos = append(infos, info) updateInfos = append(updateInfos, info) } patches, patchError := v.getVolumeUpdatePatches(infos, singular) if patchError != nil { return patchError } if v.PrintObject != nil { return v.PrintObject(infos) } failed := false for _, info := range updateInfos { var obj runtime.Object if len(info.ResourceVersion) == 0 { obj, err = resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, false, info.Object) } else { obj, err = resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, true, info.Object) } if err != nil { handlePodUpdateError(v.Err, err, "volume") failed = true continue } info.Refresh(obj, true) fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name) } for _, patch := range patches { info := patch.Info if patch.Err != nil { failed = true fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err) continue } if string(patch.Patch) == "{}" || len(patch.Patch) == 0 { fmt.Fprintf(v.Err, "info: %s %q was not changed\n", info.Mapping.Resource, info.Name) continue } glog.V(4).Infof("Calculated patch %s", patch.Patch) obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, kapi.StrategicMergePatchType, patch.Patch) if err != nil { handlePodUpdateError(v.Err, err, "volume") failed = true continue } info.Refresh(obj, true) kcmdutil.PrintSuccess(v.Mapper, false, v.Out, info.Mapping.Resource, info.Name, false, "updated") } if failed { return cmdutil.ErrExit } return nil } func (v *VolumeOptions) getVolumeUpdatePatches(infos []*resource.Info, singular bool) ([]*Patch, error) { skipped := 0 patches := CalculatePatches(infos, v.Encoder, func(info *resource.Info) (bool, error) { transformed := false ok, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error { var e error switch { case v.Add: e = v.addVolumeToSpec(spec, info, singular) transformed = true case v.Remove: e = v.removeVolumeFromSpec(spec, info) transformed = true } return e }) if !ok { skipped++ } return transformed, err }) if singular && skipped == len(infos) { patchError := fmt.Errorf("the %s %s is not a pod or does not have a pod template", infos[0].Mapping.Resource, infos[0].Name) return patches, patchError } return patches, nil } func setVolumeSourceByType(kv *kapi.Volume, opts *AddVolumeOptions) error { switch strings.ToLower(opts.Type) { case "emptydir": kv.EmptyDir = &kapi.EmptyDirVolumeSource{} case "hostpath": kv.HostPath = &kapi.HostPathVolumeSource{ Path: opts.Path, } case "secret": kv.Secret = &kapi.SecretVolumeSource{ SecretName: opts.SecretName, } case "configmap": kv.ConfigMap = &kapi.ConfigMapVolumeSource{ LocalObjectReference: kapi.LocalObjectReference{ Name: opts.ConfigMapName, }, } case "persistentvolumeclaim", "pvc": kv.PersistentVolumeClaim = &kapi.PersistentVolumeClaimVolumeSource{ ClaimName: opts.ClaimName, } default: return fmt.Errorf("invalid volume type: %s", opts.Type) } return nil } func (v *VolumeOptions) printVolumes(infos []*resource.Info) []error { listingErrors := []error{} for _, info := range infos { _, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error { return v.listVolumeForSpec(spec, info) }) if err != nil { listingErrors = append(listingErrors, err) fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, err) } } return listingErrors } func (v *AddVolumeOptions) createClaim() *kapi.PersistentVolumeClaim { pvc := &kapi.PersistentVolumeClaim{ ObjectMeta: kapi.ObjectMeta{ Name: v.ClaimName, }, Spec: kapi.PersistentVolumeClaimSpec{ AccessModes: []kapi.PersistentVolumeAccessMode{kapi.PersistentVolumeAccessMode(v.ClaimMode)}, Resources: kapi.ResourceRequirements{ Requests: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceStorage): kresource.MustParse(v.ClaimSize), }, }, }, } if len(v.ClaimClass) > 0 { pvc.Annotations = map[string]string{ storageAnnClass: v.ClaimClass, } } return pvc } func (v *VolumeOptions) setVolumeSource(kv *kapi.Volume) error { var err error opts := v.AddOpts if len(opts.Type) > 0 { err = setVolumeSourceByType(kv, opts) } else if len(opts.Source) > 0 { err = json.Unmarshal([]byte(opts.Source), &kv.VolumeSource) } return err } func (v *VolumeOptions) setVolumeMount(spec *kapi.PodSpec, info *resource.Info) error { opts := v.AddOpts containers, _ := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } for _, c := range containers { for _, m := range c.VolumeMounts { if path.Clean(m.MountPath) == path.Clean(opts.MountPath) && m.Name != v.Name { return fmt.Errorf("volume mount '%s' already exists for container '%s'", opts.MountPath, c.Name) } } for i, m := range c.VolumeMounts { if m.Name == v.Name { c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...) break } } volumeMount := &kapi.VolumeMount{ Name: v.Name, MountPath: path.Clean(opts.MountPath), } c.VolumeMounts = append(c.VolumeMounts, *volumeMount) } return nil } func (v *VolumeOptions) getVolumeName(spec *kapi.PodSpec, singleResource bool) (string, error) { opts := v.AddOpts if opts.Overwrite { // Multiple resources can have same mount-path for different volumes, // so restrict it for single resource to uniquely find the volume if !singleResource { return "", fmt.Errorf("you must specify --name for the volume name when dealing with multiple resources") } if len(opts.MountPath) > 0 { containers, _ := selectContainers(spec.Containers, v.Containers) var name string matchCount := 0 for _, c := range containers { for _, m := range c.VolumeMounts { if path.Clean(m.MountPath) == path.Clean(opts.MountPath) { name = m.Name matchCount += 1 break } } } switch matchCount { case 0: return "", fmt.Errorf("unable to find the volume for mount-path: %s", opts.MountPath) case 1: return name, nil default: return "", fmt.Errorf("found multiple volumes with same mount-path: %s", opts.MountPath) } } else { return "", fmt.Errorf("ambiguous --overwrite, specify --name or --mount-path") } } else { // Generate volume name name := kapi.SimpleNameGenerator.GenerateName(volumePrefix) if len(v.Output) == 0 { fmt.Fprintf(v.Err, "info: Generated volume name: %s\n", name) } return name, nil } } func (v *VolumeOptions) addVolumeToSpec(spec *kapi.PodSpec, info *resource.Info, singleResource bool) error { opts := v.AddOpts if len(v.Name) == 0 { var err error v.Name, err = v.getVolumeName(spec, singleResource) if err != nil { return err } } newVolume := &kapi.Volume{ Name: v.Name, } setSource := true for i, vol := range spec.Volumes { if v.Name == vol.Name { if !opts.Overwrite { return fmt.Errorf("volume '%s' already exists. Use --overwrite to replace", v.Name) } if !opts.TypeChanged && len(opts.Source) == 0 { newVolume.VolumeSource = vol.VolumeSource setSource = false } spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...) break } } if setSource { err := v.setVolumeSource(newVolume) if err != nil { return err } } spec.Volumes = append(spec.Volumes, *newVolume) if len(opts.MountPath) > 0 { err := v.setVolumeMount(spec, info) if err != nil { return err } } return nil } func (v *VolumeOptions) removeSpecificVolume(spec *kapi.PodSpec, containers, skippedContainers []*kapi.Container) error { for _, c := range containers { for i, m := range c.VolumeMounts { if v.Name == m.Name { c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...) break } } } // Remove volume if no container is using it found := false for _, c := range skippedContainers { for _, m := range c.VolumeMounts { if v.Name == m.Name { found = true break } } if found { break } } if !found { foundVolume := false for i, vol := range spec.Volumes { if v.Name == vol.Name { spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...) foundVolume = true break } } if !foundVolume { return fmt.Errorf("volume '%s' not found", v.Name) } } return nil } func (v *VolumeOptions) removeVolumeFromSpec(spec *kapi.PodSpec, info *resource.Info) error { containers, skippedContainers := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } if len(v.Name) == 0 { for _, c := range containers { c.VolumeMounts = []kapi.VolumeMount{} } spec.Volumes = []kapi.Volume{} } else { err := v.removeSpecificVolume(spec, containers, skippedContainers) if err != nil { return err } } return nil } func sourceAccessMode(readOnly bool) string { if readOnly { return " read-only" } return "" } func describePersistentVolumeClaim(claim *kapi.PersistentVolumeClaim) string { if len(claim.Spec.VolumeName) == 0 { // TODO: check for other dimensions of request - IOPs, etc if val, ok := claim.Spec.Resources.Requests[kapi.ResourceStorage]; ok { return fmt.Sprintf("waiting for %sB allocation", val.String()) } return "waiting to allocate" } // TODO: check for other dimensions of capacity? if val, ok := claim.Status.Capacity[kapi.ResourceStorage]; ok { return fmt.Sprintf("allocated %sB", val.String()) } return "allocated unknown size" } func describeVolumeSource(source *kapi.VolumeSource) string { switch { case source.AWSElasticBlockStore != nil: return fmt.Sprintf("AWS EBS %s type=%s partition=%d%s", source.AWSElasticBlockStore.VolumeID, source.AWSElasticBlockStore.FSType, source.AWSElasticBlockStore.Partition, sourceAccessMode(source.AWSElasticBlockStore.ReadOnly)) case source.EmptyDir != nil: return "empty directory" case source.GCEPersistentDisk != nil: return fmt.Sprintf("GCE PD %s type=%s partition=%d%s", source.GCEPersistentDisk.PDName, source.GCEPersistentDisk.FSType, source.GCEPersistentDisk.Partition, sourceAccessMode(source.GCEPersistentDisk.ReadOnly)) case source.GitRepo != nil: if len(source.GitRepo.Revision) == 0 { return fmt.Sprintf("Git repository %s", source.GitRepo.Repository) } return fmt.Sprintf("Git repository %s @ %s", source.GitRepo.Repository, source.GitRepo.Revision) case source.Glusterfs != nil: return fmt.Sprintf("GlusterFS %s:%s%s", source.Glusterfs.EndpointsName, source.Glusterfs.Path, sourceAccessMode(source.Glusterfs.ReadOnly)) case source.HostPath != nil: return fmt.Sprintf("host path %s", source.HostPath.Path) case source.ISCSI != nil: return fmt.Sprintf("ISCSI %s target-portal=%s type=%s lun=%d%s", source.ISCSI.IQN, source.ISCSI.TargetPortal, source.ISCSI.FSType, source.ISCSI.Lun, sourceAccessMode(source.ISCSI.ReadOnly)) case source.NFS != nil: return fmt.Sprintf("NFS %s:%s%s", source.NFS.Server, source.NFS.Path, sourceAccessMode(source.NFS.ReadOnly)) case source.PersistentVolumeClaim != nil: return fmt.Sprintf("pvc/%s%s", source.PersistentVolumeClaim.ClaimName, sourceAccessMode(source.PersistentVolumeClaim.ReadOnly)) case source.RBD != nil: return fmt.Sprintf("Ceph RBD %v type=%s image=%s pool=%s%s", source.RBD.CephMonitors, source.RBD.FSType, source.RBD.RBDImage, source.RBD.RBDPool, sourceAccessMode(source.RBD.ReadOnly)) case source.Secret != nil: return fmt.Sprintf("secret/%s", source.Secret.SecretName) default: return "unknown" } } func (v *VolumeOptions) listVolumeForSpec(spec *kapi.PodSpec, info *resource.Info) error { containers, _ := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name) checkName := (len(v.Name) > 0) found := false for _, vol := range spec.Volumes { if checkName && v.Name != vol.Name { continue } found = true refInfo := "" if vol.VolumeSource.PersistentVolumeClaim != nil { claimName := vol.VolumeSource.PersistentVolumeClaim.ClaimName claim, err := v.Client.PersistentVolumeClaims(info.Namespace).Get(claimName) switch { case err == nil: refInfo = fmt.Sprintf("(%s)", describePersistentVolumeClaim(claim)) case apierrs.IsNotFound(err): refInfo = "(does not exist)" default: fmt.Fprintf(v.Err, "error: unable to retrieve persistent volume claim %s referenced in %s/%s: %v", claimName, info.Mapping.Resource, info.Name, err) } } if len(refInfo) > 0 { refInfo = " " + refInfo } fmt.Fprintf(v.Out, " %s%s as %s\n", describeVolumeSource(&vol.VolumeSource), refInfo, vol.Name) for _, c := range containers { for _, m := range c.VolumeMounts { if vol.Name != m.Name { continue } if len(spec.Containers) == 1 { fmt.Fprintf(v.Out, " mounted at %s\n", m.MountPath) } else { fmt.Fprintf(v.Out, " mounted at %s in container %s\n", m.MountPath, c.Name) } } } } if checkName && !found { return fmt.Errorf("volume %q not found", v.Name) } return nil }
Java
/* * Copyright 2011 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.widgets.client.widget; public class PercentageCalculator { public static int calculatePercent(int numerator, int denominator) { int percent = 0; if (denominator != 0) { percent = (int) ((((float) denominator - (float) numerator) / (float) denominator) * 100); } return percent; } }
Java
/* * Copyright 2015 DuraSpace, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fcrepo.http.api; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; /** * @author cabeer * @since 10/17/14 */ @Component public class FedoraHttpConfiguration { @Value("${fcrepo.http.ldp.putRequiresIfMatch:false}") private boolean putRequiresIfMatch; /** * Should PUT requests require an If-Match header? * @return put request if match */ public boolean putRequiresIfMatch() { return putRequiresIfMatch; } }
Java
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack.compute import flavors as flavors_api from nova.api.openstack.compute.views import flavors as flavors_view from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.compute import flavors from nova import exception from nova.openstack.common.gettextutils import _ authorize = extensions.extension_authorizer('compute', 'flavormanage') class FlavorManageController(wsgi.Controller): """ The Flavor Lifecycle API controller for the OpenStack API. """ _view_builder_class = flavors_view.ViewBuilder def __init__(self): super(FlavorManageController, self).__init__() @wsgi.action("delete") def _delete(self, req, id): context = req.environ['nova.context'] authorize(context) try: flavor = flavors.get_flavor_by_flavor_id( id, ctxt=context, read_deleted="no") except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) flavors.destroy(flavor['name']) return webob.Response(status_int=202) @wsgi.action("create") @wsgi.serializers(xml=flavors_api.FlavorTemplate) def _create(self, req, body): context = req.environ['nova.context'] authorize(context) if not self.is_valid_body(body, 'flavor'): msg = _("Invalid request body") raise webob.exc.HTTPBadRequest(explanation=msg) vals = body['flavor'] name = vals.get('name') flavorid = vals.get('id') memory = vals.get('ram') vcpus = vals.get('vcpus') root_gb = vals.get('disk') ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0) swap = vals.get('swap', 0) rxtx_factor = vals.get('rxtx_factor', 1.0) is_public = vals.get('os-flavor-access:is_public', True) try: flavor = flavors.create(name, memory, vcpus, root_gb, ephemeral_gb=ephemeral_gb, flavorid=flavorid, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) if not flavor['is_public']: flavors.add_flavor_access(flavor['flavorid'], context.project_id, context) req.cache_db_flavor(flavor) except (exception.InstanceTypeExists, exception.InstanceTypeIdExists) as err: raise webob.exc.HTTPConflict(explanation=err.format_message()) except exception.InvalidInput as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) return self._view_builder.show(req, flavor) class Flavormanage(extensions.ExtensionDescriptor): """ Flavor create/delete API support """ name = "FlavorManage" alias = "os-flavor-manage" namespace = ("http://docs.openstack.org/compute/ext/" "flavor_manage/api/v1.1") updated = "2012-01-19T00:00:00+00:00" def get_controller_extensions(self): controller = FlavorManageController() extension = extensions.ControllerExtension(self, 'flavors', controller) return [extension]
Java
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/pubsub/v1/pubsub.proto namespace Google\Cloud\PubSub\V1; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\RepeatedField; use Google\Protobuf\Internal\GPBUtil; /** * Request for the ModifyAckDeadline method. * * Generated from protobuf message <code>google.pubsub.v1.ModifyAckDeadlineRequest</code> */ class ModifyAckDeadlineRequest extends \Google\Protobuf\Internal\Message { /** * Required. The name of the subscription. * Format is `projects/{project}/subscriptions/{sub}`. * * Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code> */ private $subscription = ''; /** * Required. List of acknowledgment IDs. * * Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code> */ private $ack_ids; /** * Required. The new ack deadline with respect to the time this request was * sent to the Pub/Sub system. For example, if the value is 10, the new ack * deadline will expire 10 seconds after the `ModifyAckDeadline` call was * made. Specifying zero might immediately make the message available for * delivery to another subscriber client. This typically results in an * increase in the rate of message redeliveries (that is, duplicates). * The minimum deadline you can specify is 0 seconds. * The maximum deadline you can specify is 600 seconds (10 minutes). * * Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code> */ private $ack_deadline_seconds = 0; /** * Constructor. * * @param array $data { * Optional. Data for populating the Message object. * * @type string $subscription * Required. The name of the subscription. * Format is `projects/{project}/subscriptions/{sub}`. * @type string[]|\Google\Protobuf\Internal\RepeatedField $ack_ids * Required. List of acknowledgment IDs. * @type int $ack_deadline_seconds * Required. The new ack deadline with respect to the time this request was * sent to the Pub/Sub system. For example, if the value is 10, the new ack * deadline will expire 10 seconds after the `ModifyAckDeadline` call was * made. Specifying zero might immediately make the message available for * delivery to another subscriber client. This typically results in an * increase in the rate of message redeliveries (that is, duplicates). * The minimum deadline you can specify is 0 seconds. * The maximum deadline you can specify is 600 seconds (10 minutes). * } */ public function __construct($data = NULL) { \GPBMetadata\Google\Pubsub\V1\Pubsub::initOnce(); parent::__construct($data); } /** * Required. The name of the subscription. * Format is `projects/{project}/subscriptions/{sub}`. * * Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code> * @return string */ public function getSubscription() { return $this->subscription; } /** * Required. The name of the subscription. * Format is `projects/{project}/subscriptions/{sub}`. * * Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code> * @param string $var * @return $this */ public function setSubscription($var) { GPBUtil::checkString($var, True); $this->subscription = $var; return $this; } /** * Required. List of acknowledgment IDs. * * Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code> * @return \Google\Protobuf\Internal\RepeatedField */ public function getAckIds() { return $this->ack_ids; } /** * Required. List of acknowledgment IDs. * * Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code> * @param string[]|\Google\Protobuf\Internal\RepeatedField $var * @return $this */ public function setAckIds($var) { $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING); $this->ack_ids = $arr; return $this; } /** * Required. The new ack deadline with respect to the time this request was * sent to the Pub/Sub system. For example, if the value is 10, the new ack * deadline will expire 10 seconds after the `ModifyAckDeadline` call was * made. Specifying zero might immediately make the message available for * delivery to another subscriber client. This typically results in an * increase in the rate of message redeliveries (that is, duplicates). * The minimum deadline you can specify is 0 seconds. * The maximum deadline you can specify is 600 seconds (10 minutes). * * Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code> * @return int */ public function getAckDeadlineSeconds() { return $this->ack_deadline_seconds; } /** * Required. The new ack deadline with respect to the time this request was * sent to the Pub/Sub system. For example, if the value is 10, the new ack * deadline will expire 10 seconds after the `ModifyAckDeadline` call was * made. Specifying zero might immediately make the message available for * delivery to another subscriber client. This typically results in an * increase in the rate of message redeliveries (that is, duplicates). * The minimum deadline you can specify is 0 seconds. * The maximum deadline you can specify is 600 seconds (10 minutes). * * Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code> * @param int $var * @return $this */ public function setAckDeadlineSeconds($var) { GPBUtil::checkInt32($var); $this->ack_deadline_seconds = $var; return $this; } }
Java
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.util.xml.highlighting; import com.intellij.codeInspection.LocalQuickFix; import com.intellij.codeInspection.ProblemHighlightType; import com.intellij.lang.annotation.Annotation; import com.intellij.lang.annotation.HighlightSeverity; import com.intellij.openapi.util.TextRange; import com.intellij.psi.PsiReference; import com.intellij.util.xml.DomElement; import com.intellij.util.xml.DomFileElement; import com.intellij.util.xml.GenericDomValue; import com.intellij.util.xml.reflect.DomCollectionChildDescription; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; public interface DomElementAnnotationHolder extends Iterable<DomElementProblemDescriptor>{ boolean isOnTheFly(); @NotNull DomFileElement<?> getFileElement(); @NotNull DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, @Nullable String message, LocalQuickFix... fixes); @NotNull DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, DomCollectionChildDescription childDescription, @Nullable String message); @NotNull DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message); DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message, LocalQuickFix... fixes); DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message, TextRange textRange, LocalQuickFix... fixes); DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, ProblemHighlightType highlightType, String message, @Nullable TextRange textRange, LocalQuickFix... fixes); @NotNull DomElementResolveProblemDescriptor createResolveProblem(@NotNull GenericDomValue element, @NotNull PsiReference reference); /** * Is useful only if called from {@link com.intellij.util.xml.highlighting.DomElementsAnnotator} instance * @param element element * @param severity highlight severity * @param message description * @return annotation */ @NotNull Annotation createAnnotation(@NotNull DomElement element, HighlightSeverity severity, @Nullable String message); int getSize(); }
Java
# megam_rustyprint Display data in table format on console
Java
package com.mapswithme.maps.purchase; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.text.TextUtils; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import android.widget.Toast; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import com.android.billingclient.api.SkuDetails; import com.bumptech.glide.Glide; import com.mapswithme.maps.Framework; import com.mapswithme.maps.PrivateVariables; import com.mapswithme.maps.PurchaseOperationObservable; import com.mapswithme.maps.R; import com.mapswithme.maps.base.BaseMwmFragment; import com.mapswithme.maps.bookmarks.data.PaymentData; import com.mapswithme.maps.dialog.AlertDialogCallback; import com.mapswithme.util.Utils; import com.mapswithme.util.log.Logger; import com.mapswithme.util.log.LoggerFactory; import com.mapswithme.util.statistics.Statistics; import java.util.Collections; import java.util.List; public class BookmarkPaymentFragment extends BaseMwmFragment implements AlertDialogCallback, PurchaseStateActivator<BookmarkPaymentState> { static final String ARG_PAYMENT_DATA = "arg_payment_data"; private static final Logger LOGGER = LoggerFactory.INSTANCE.getLogger(LoggerFactory.Type.BILLING); private static final String TAG = BookmarkPaymentFragment.class.getSimpleName(); private static final String EXTRA_CURRENT_STATE = "extra_current_state"; private static final String EXTRA_PRODUCT_DETAILS = "extra_product_details"; private static final String EXTRA_SUBS_PRODUCT_DETAILS = "extra_subs_product_details"; private static final String EXTRA_VALIDATION_RESULT = "extra_validation_result"; @SuppressWarnings("NullableProblems") @NonNull private PurchaseController<PurchaseCallback> mPurchaseController; @SuppressWarnings("NullableProblems") @NonNull private BookmarkPurchaseCallback mPurchaseCallback; @SuppressWarnings("NullableProblems") @NonNull private PaymentData mPaymentData; @Nullable private ProductDetails mProductDetails; @Nullable private ProductDetails mSubsProductDetails; private boolean mValidationResult; @NonNull private BookmarkPaymentState mState = BookmarkPaymentState.NONE; @SuppressWarnings("NullableProblems") @NonNull private BillingManager<PlayStoreBillingCallback> mSubsProductDetailsLoadingManager; @NonNull private final SubsProductDetailsCallback mSubsProductDetailsCallback = new SubsProductDetailsCallback(); @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); Bundle args = getArguments(); if (args == null) throw new IllegalStateException("Args must be provided for payment fragment!"); PaymentData paymentData = args.getParcelable(ARG_PAYMENT_DATA); if (paymentData == null) throw new IllegalStateException("Payment data must be provided for payment fragment!"); mPaymentData = paymentData; mPurchaseCallback = new BookmarkPurchaseCallback(mPaymentData.getServerId()); } @Nullable @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { mPurchaseController = PurchaseFactory.createBookmarkPurchaseController(requireContext(), mPaymentData.getProductId(), mPaymentData.getServerId()); if (savedInstanceState != null) mPurchaseController.onRestore(savedInstanceState); mPurchaseController.initialize(requireActivity()); mSubsProductDetailsLoadingManager = PurchaseFactory.createSubscriptionBillingManager(); mSubsProductDetailsLoadingManager.initialize(requireActivity()); mSubsProductDetailsLoadingManager.addCallback(mSubsProductDetailsCallback); mSubsProductDetailsCallback.attach(this); View root = inflater.inflate(R.layout.fragment_bookmark_payment, container, false); View subscriptionButton = root.findViewById(R.id.buy_subs_btn); subscriptionButton.setOnClickListener(v -> onBuySubscriptionClicked()); TextView buyInappBtn = root.findViewById(R.id.buy_inapp_btn); buyInappBtn.setOnClickListener(v -> onBuyInappClicked()); return root; } private void onBuySubscriptionClicked() { SubscriptionType type = SubscriptionType.getTypeByBookmarksGroup(mPaymentData.getGroup()); if (type.equals(SubscriptionType.BOOKMARKS_SIGHTS)) { BookmarksSightsSubscriptionActivity.startForResult (this, PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION, Statistics.ParamValue.CARD); return; } BookmarksAllSubscriptionActivity.startForResult (this, PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION, Statistics.ParamValue.CARD); } @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if (resultCode != Activity.RESULT_OK) return; if (requestCode == PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION) { Intent intent = new Intent(); intent.putExtra(PurchaseUtils.EXTRA_IS_SUBSCRIPTION, true); requireActivity().setResult(Activity.RESULT_OK, intent); requireActivity().finish(); } } private void onBuyInappClicked() { Statistics.INSTANCE.trackPurchasePreviewSelect(mPaymentData.getServerId(), mPaymentData.getProductId()); Statistics.INSTANCE.trackPurchaseEvent(Statistics.EventName.INAPP_PURCHASE_PREVIEW_PAY, mPaymentData.getServerId(), Statistics.STATISTICS_CHANNEL_REALTIME); startPurchaseTransaction(); } @Override public boolean onBackPressed() { if (mState == BookmarkPaymentState.VALIDATION) { Toast.makeText(requireContext(), R.string.purchase_please_wait_toast, Toast.LENGTH_SHORT) .show(); return true; } Statistics.INSTANCE.trackPurchaseEvent(Statistics.EventName.INAPP_PURCHASE_PREVIEW_CANCEL, mPaymentData.getServerId()); return super.onBackPressed(); } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); if (savedInstanceState == null) Statistics.INSTANCE.trackPurchasePreviewShow(mPaymentData.getServerId(), PrivateVariables.bookmarksVendor(), mPaymentData.getProductId()); LOGGER.d(TAG, "onViewCreated savedInstanceState = " + savedInstanceState); setInitialPaymentData(); loadImage(); if (savedInstanceState != null) { mProductDetails = savedInstanceState.getParcelable(EXTRA_PRODUCT_DETAILS); if (mProductDetails != null) updateProductDetails(); mSubsProductDetails = savedInstanceState.getParcelable(EXTRA_SUBS_PRODUCT_DETAILS); if (mSubsProductDetails != null) updateSubsProductDetails(); mValidationResult = savedInstanceState.getBoolean(EXTRA_VALIDATION_RESULT); BookmarkPaymentState savedState = BookmarkPaymentState.values()[savedInstanceState.getInt(EXTRA_CURRENT_STATE)]; activateState(savedState); return; } activateState(BookmarkPaymentState.PRODUCT_DETAILS_LOADING); mPurchaseController.queryProductDetails(); SubscriptionType type = SubscriptionType.getTypeByBookmarksGroup(mPaymentData.getGroup()); List<String> subsProductIds = Collections.singletonList(type.getMonthlyProductId()); mSubsProductDetailsLoadingManager.queryProductDetails(subsProductIds); } @Override public void onDestroyView() { super.onDestroyView(); mPurchaseController.destroy(); mSubsProductDetailsLoadingManager.removeCallback(mSubsProductDetailsCallback); mSubsProductDetailsCallback.detach(); mSubsProductDetailsLoadingManager.destroy(); } private void startPurchaseTransaction() { activateState(BookmarkPaymentState.TRANSACTION_STARTING); Framework.nativeStartPurchaseTransaction(mPaymentData.getServerId(), PrivateVariables.bookmarksVendor()); } void launchBillingFlow() { mPurchaseController.launchPurchaseFlow(mPaymentData.getProductId()); activateState(BookmarkPaymentState.PAYMENT_IN_PROGRESS); } @Override public void onStart() { super.onStart(); PurchaseOperationObservable observable = PurchaseOperationObservable.from(requireContext()); observable.addTransactionObserver(mPurchaseCallback); mPurchaseController.addCallback(mPurchaseCallback); mPurchaseCallback.attach(this); } @Override public void onStop() { super.onStop(); PurchaseOperationObservable observable = PurchaseOperationObservable.from(requireContext()); observable.removeTransactionObserver(mPurchaseCallback); mPurchaseController.removeCallback(); mPurchaseCallback.detach(); } @Override public void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); LOGGER.d(TAG, "onSaveInstanceState"); outState.putInt(EXTRA_CURRENT_STATE, mState.ordinal()); outState.putParcelable(EXTRA_PRODUCT_DETAILS, mProductDetails); outState.putParcelable(EXTRA_SUBS_PRODUCT_DETAILS, mSubsProductDetails); mPurchaseController.onSave(outState); } @Override public void activateState(@NonNull BookmarkPaymentState state) { if (state == mState) return; LOGGER.i(TAG, "Activate state: " + state); mState = state; mState.activate(this); } private void loadImage() { if (TextUtils.isEmpty(mPaymentData.getImgUrl())) return; ImageView imageView = getViewOrThrow().findViewById(R.id.image); Glide.with(imageView.getContext()) .load(mPaymentData.getImgUrl()) .centerCrop() .into(imageView); } private void setInitialPaymentData() { TextView name = getViewOrThrow().findViewById(R.id.product_catalog_name); name.setText(mPaymentData.getName()); TextView author = getViewOrThrow().findViewById(R.id.author_name); author.setText(mPaymentData.getAuthorName()); } void handleProductDetails(@NonNull List<SkuDetails> details) { if (details.isEmpty()) return; SkuDetails skuDetails = details.get(0); mProductDetails = PurchaseUtils.toProductDetails(skuDetails); } void handleSubsProductDetails(@NonNull List<SkuDetails> details) { if (details.isEmpty()) return; SkuDetails skuDetails = details.get(0); mSubsProductDetails = PurchaseUtils.toProductDetails(skuDetails); } void handleValidationResult(boolean validationResult) { mValidationResult = validationResult; } @Override public void onAlertDialogPositiveClick(int requestCode, int which) { handleErrorDialogEvent(requestCode); } @Override public void onAlertDialogNegativeClick(int requestCode, int which) { // Do nothing by default. } @Override public void onAlertDialogCancel(int requestCode) { handleErrorDialogEvent(requestCode); } private void handleErrorDialogEvent(int requestCode) { switch (requestCode) { case PurchaseUtils.REQ_CODE_PRODUCT_DETAILS_FAILURE: requireActivity().finish(); break; case PurchaseUtils.REQ_CODE_START_TRANSACTION_FAILURE: case PurchaseUtils.REQ_CODE_PAYMENT_FAILURE: activateState(BookmarkPaymentState.PRODUCT_DETAILS_LOADED); break; } } void updateProductDetails() { if (mProductDetails == null) throw new AssertionError("Product details must be obtained at this moment!"); TextView buyButton = getViewOrThrow().findViewById(R.id.buy_inapp_btn); String price = Utils.formatCurrencyString(mProductDetails.getPrice(), mProductDetails.getCurrencyCode()); buyButton.setText(getString(R.string.buy_btn, price)); TextView storeName = getViewOrThrow().findViewById(R.id.product_store_name); storeName.setText(mProductDetails.getTitle()); } void updateSubsProductDetails() { if (mSubsProductDetails == null) throw new AssertionError("Subs product details must be obtained at this moment!"); String formattedPrice = Utils.formatCurrencyString(mSubsProductDetails.getPrice(), mSubsProductDetails.getCurrencyCode()); TextView subsButton = getViewOrThrow().findViewById(R.id.buy_subs_btn); subsButton.setText(getString(R.string.buy_btn_for_subscription_version_2, formattedPrice)); } void finishValidation() { if (mValidationResult) requireActivity().setResult(Activity.RESULT_OK); requireActivity().finish(); } }
Java
/* * Copyright 2010-2013 Coda Hale and Yammer, Inc., 2014-2017 Dropwizard Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.dropwizard.metrics; import org.junit.Test; import io.dropwizard.metrics.SlidingWindowReservoir; import static org.assertj.core.api.Assertions.assertThat; public class SlidingWindowReservoirTest { private final SlidingWindowReservoir reservoir = new SlidingWindowReservoir(3); @Test public void handlesSmallDataStreams() throws Exception { reservoir.update(1); reservoir.update(2); assertThat(reservoir.getSnapshot().getValues()) .containsOnly(1, 2); } @Test public void onlyKeepsTheMostRecentFromBigDataStreams() throws Exception { reservoir.update(1); reservoir.update(2); reservoir.update(3); reservoir.update(4); assertThat(reservoir.getSnapshot().getValues()) .containsOnly(2, 3, 4); } }
Java
/* * Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.concurrent.atomicreference; import com.hazelcast.core.HazelcastInstance; import com.hazelcast.core.IAtomicReference; import com.hazelcast.test.HazelcastParallelClassRunner; import com.hazelcast.test.HazelcastTestSupport; import com.hazelcast.test.annotation.ParallelTest; import com.hazelcast.test.annotation.QuickTest; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import java.io.Serializable; import java.util.concurrent.ExecutionException; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; @RunWith(HazelcastParallelClassRunner.class) @Category({QuickTest.class, ParallelTest.class}) public class AtomicReferenceInstanceSharingTest extends HazelcastTestSupport { private HazelcastInstance[] instances; private HazelcastInstance local; private HazelcastInstance remote; @Before public void setUp() { instances = createHazelcastInstanceFactory(2).newInstances(); warmUpPartitions(instances); local = instances[0]; remote = instances[1]; } @Test public void invocationToLocalMember() throws ExecutionException, InterruptedException { String localKey = generateKeyOwnedBy(local); IAtomicReference<DummyObject> ref = local.getAtomicReference(localKey); DummyObject inserted = new DummyObject(); ref.set(inserted); DummyObject get1 = ref.get(); DummyObject get2 = ref.get(); assertNotNull(get1); assertNotNull(get2); assertNotSame(get1, get2); assertNotSame(get1, inserted); assertNotSame(get2, inserted); } public static class DummyObject implements Serializable { } @Test public void invocationToRemoteMember() throws ExecutionException, InterruptedException { String localKey = generateKeyOwnedBy(remote); IAtomicReference<DummyObject> ref = local.getAtomicReference(localKey); DummyObject inserted = new DummyObject(); ref.set(inserted); DummyObject get1 = ref.get(); DummyObject get2 = ref.get(); assertNotNull(get1); assertNotNull(get2); assertNotSame(get1, get2); assertNotSame(get1, inserted); assertNotSame(get2, inserted); } }
Java
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/codedeploy/model/TrafficRoutingType.h> #include <aws/core/utils/HashingUtils.h> #include <aws/core/Globals.h> #include <aws/core/utils/EnumParseOverflowContainer.h> using namespace Aws::Utils; namespace Aws { namespace CodeDeploy { namespace Model { namespace TrafficRoutingTypeMapper { static const int TimeBasedCanary_HASH = HashingUtils::HashString("TimeBasedCanary"); static const int TimeBasedLinear_HASH = HashingUtils::HashString("TimeBasedLinear"); static const int AllAtOnce_HASH = HashingUtils::HashString("AllAtOnce"); TrafficRoutingType GetTrafficRoutingTypeForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); if (hashCode == TimeBasedCanary_HASH) { return TrafficRoutingType::TimeBasedCanary; } else if (hashCode == TimeBasedLinear_HASH) { return TrafficRoutingType::TimeBasedLinear; } else if (hashCode == AllAtOnce_HASH) { return TrafficRoutingType::AllAtOnce; } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { overflowContainer->StoreOverflow(hashCode, name); return static_cast<TrafficRoutingType>(hashCode); } return TrafficRoutingType::NOT_SET; } Aws::String GetNameForTrafficRoutingType(TrafficRoutingType enumValue) { switch(enumValue) { case TrafficRoutingType::TimeBasedCanary: return "TimeBasedCanary"; case TrafficRoutingType::TimeBasedLinear: return "TimeBasedLinear"; case TrafficRoutingType::AllAtOnce: return "AllAtOnce"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue)); } return {}; } } } // namespace TrafficRoutingTypeMapper } // namespace Model } // namespace CodeDeploy } // namespace Aws
Java
/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "fmt" "testing" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" ) var _ balancer.V2Balancer = &funcBalancer{} type funcBalancer struct { updateClientConnState func(s balancer.ClientConnState) error } func (*funcBalancer) HandleSubConnStateChange(balancer.SubConn, connectivity.State) { panic("unimplemented") // v1 API } func (*funcBalancer) HandleResolvedAddrs([]resolver.Address, error) { panic("unimplemented") // v1 API } func (b *funcBalancer) UpdateClientConnState(s balancer.ClientConnState) error { return b.updateClientConnState(s) } func (*funcBalancer) ResolverError(error) {} func (*funcBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { panic("unimplemented") // we never have sub-conns } func (*funcBalancer) Close() {} type funcBalancerBuilder struct { name string instance *funcBalancer } func (b *funcBalancerBuilder) Build(balancer.ClientConn, balancer.BuildOptions) balancer.Balancer { return b.instance } func (b *funcBalancerBuilder) Name() string { return b.name } // TestBalancerErrorResolverPolling injects balancer errors and verifies // ResolveNow is called on the resolver with the appropriate backoff strategy // being consulted between ResolveNow calls. func (s) TestBalancerErrorResolverPolling(t *testing.T) { // The test balancer will return ErrBadResolverState iff the // ClientConnState contains no addresses. fb := &funcBalancer{ updateClientConnState: func(s balancer.ClientConnState) error { if len(s.ResolverState.Addresses) == 0 { return balancer.ErrBadResolverState } return nil }, } const balName = "BalancerErrorResolverPolling" balancer.Register(&funcBalancerBuilder{name: balName, instance: fb}) testResolverErrorPolling(t, func(r *manual.Resolver) { // No addresses so the balancer will fail. r.CC.UpdateState(resolver.State{}) }, func(r *manual.Resolver) { // UpdateState will block if ResolveNow is being called (which blocks on // rn), so call it in a goroutine. Include some address so the balancer // will be happy. go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "x"}}}) }, WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balName))) }
Java
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.directory.model.transform; import java.util.Map; import java.util.Map.Entry; import java.math.*; import java.nio.ByteBuffer; import com.amazonaws.services.directory.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * CreateConditionalForwarderResult JSON Unmarshaller */ public class CreateConditionalForwarderResultJsonUnmarshaller implements Unmarshaller<CreateConditionalForwarderResult, JsonUnmarshallerContext> { public CreateConditionalForwarderResult unmarshall( JsonUnmarshallerContext context) throws Exception { CreateConditionalForwarderResult createConditionalForwarderResult = new CreateConditionalForwarderResult(); return createConditionalForwarderResult; } private static CreateConditionalForwarderResultJsonUnmarshaller instance; public static CreateConditionalForwarderResultJsonUnmarshaller getInstance() { if (instance == null) instance = new CreateConditionalForwarderResultJsonUnmarshaller(); return instance; } }
Java
--- id: version-2.8.2-io-rabbitmq-source title: RabbitMQ source connector sidebar_label: RabbitMQ source connector original_id: io-rabbitmq-source --- The RabbitMQ source connector receives messages from RabbitMQ clusters and writes messages to Pulsar topics. ## Configuration The configuration of the RabbitMQ source connector has the following properties. ### Property | Name | Type|Required | Default | Description |------|----------|----------|---------|-------------| | `connectionName` |String| true | " " (empty string) | The connection name. | | `host` | String| true | " " (empty string) | The RabbitMQ host. | | `port` | int |true | 5672 | The RabbitMQ port. | | `virtualHost` |String|true | / | The virtual host used to connect to RabbitMQ. | | `username` | String|false | guest | The username used to authenticate to RabbitMQ. | | `password` | String|false | guest | The password used to authenticate to RabbitMQ. | | `queueName` | String|true | " " (empty string) | The RabbitMQ queue name that messages should be read from or written to. | | `requestedChannelMax` | int|false | 0 | The initially requested maximum channel number. <br><br>0 means unlimited. | | `requestedFrameMax` | int|false |0 | The initially requested maximum frame size in octets. <br><br>0 means unlimited. | | `connectionTimeout` | int|false | 60000 | The timeout of TCP connection establishment in milliseconds. <br><br>0 means infinite. | | `handshakeTimeout` | int|false | 10000 | The timeout of AMQP0-9-1 protocol handshake in milliseconds. | | `requestedHeartbeat` | int|false | 60 | The requested heartbeat timeout in seconds. | | `prefetchCount` | int|false | 0 | The maximum number of messages that the server delivers.<br><br> 0 means unlimited. | | `prefetchGlobal` | boolean|false | false |Whether the setting should be applied to the entire channel rather than each consumer. | | `passive` | boolean|false | false | Whether the rabbitmq consumer should create its own queue or bind to an existing one. | ### Example Before using the RabbitMQ source connector, you need to create a configuration file through one of the following methods. * JSON ```json { "host": "localhost", "port": "5672", "virtualHost": "/", "username": "guest", "password": "guest", "queueName": "test-queue", "connectionName": "test-connection", "requestedChannelMax": "0", "requestedFrameMax": "0", "connectionTimeout": "60000", "handshakeTimeout": "10000", "requestedHeartbeat": "60", "prefetchCount": "0", "prefetchGlobal": "false", "passive": "false" } ``` * YAML ```yaml configs: host: "localhost" port: 5672 virtualHost: "/" username: "guest" password: "guest" queueName: "test-queue" connectionName: "test-connection" requestedChannelMax: 0 requestedFrameMax: 0 connectionTimeout: 60000 handshakeTimeout: 10000 requestedHeartbeat: 60 prefetchCount: 0 prefetchGlobal: "false" passive: "false" ```
Java
/* * Copyright 2020 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.server.controller.actions; import com.thoughtworks.go.config.validation.GoConfigValidity; import com.thoughtworks.go.server.web.JsonView; import com.thoughtworks.go.server.web.SimpleJsonView; import com.thoughtworks.go.serverhealth.ServerHealthState; import com.thoughtworks.go.util.GoConstants; import org.springframework.web.servlet.ModelAndView; import javax.servlet.http.HttpServletResponse; import java.util.LinkedHashMap; import java.util.Map; import static com.thoughtworks.go.util.GoConstants.ERROR_FOR_JSON; import static com.thoughtworks.go.util.GoConstants.RESPONSE_CHARSET_JSON; import static javax.servlet.http.HttpServletResponse.*; public class JsonAction implements RestfulAction { private final int status; private final Object json; public static JsonAction from(ServerHealthState serverHealthState) { if (serverHealthState.isSuccess()) { return jsonCreated(new LinkedHashMap()); } Map<String, Object> jsonLog = new LinkedHashMap<>(); jsonLog.put(ERROR_FOR_JSON, serverHealthState.getDescription()); return new JsonAction(serverHealthState.getType().getHttpCode(), jsonLog); } public static JsonAction jsonCreated(Object json) { return new JsonAction(SC_CREATED, json); } public static JsonAction jsonFound(Object json) { return new JsonAction(SC_OK, json); } public static JsonAction jsonOK() { return jsonOK(new LinkedHashMap()); } public static JsonAction jsonNotAcceptable(Object json) { return new JsonAction(SC_NOT_ACCEPTABLE, json); } public static JsonAction jsonForbidden() { return new JsonAction(SC_FORBIDDEN, new LinkedHashMap()); } public static JsonAction jsonForbidden(String message) { Map<String, Object> map = new LinkedHashMap<>(); map.put(ERROR_FOR_JSON, message); return new JsonAction(SC_FORBIDDEN, map); } public static JsonAction jsonForbidden(Exception e) { return jsonForbidden(e.getMessage()); } public static JsonAction jsonBadRequest(Object json) { return new JsonAction(SC_BAD_REQUEST, json); } public static JsonAction jsonNotFound(Object json) { return new JsonAction(SC_NOT_FOUND, json); } public static JsonAction jsonConflict(Object json) { return new JsonAction(SC_CONFLICT, json); } public static JsonAction jsonByValidity(Object json, GoConfigValidity.InvalidGoConfig configValidity) { return (configValidity.isType(GoConfigValidity.VT_CONFLICT) || configValidity.isType(GoConfigValidity.VT_MERGE_OPERATION_ERROR) || configValidity.isType(GoConfigValidity.VT_MERGE_POST_VALIDATION_ERROR) || configValidity.isType(GoConfigValidity.VT_MERGE_PRE_VALIDATION_ERROR)) ? jsonConflict(json) : jsonNotFound(json); } /** * @deprecated replace with createView */ @Override public ModelAndView respond(HttpServletResponse response) { return new JsonModelAndView(response, json, status); } private JsonAction(int status, Object json) { this.status = status; this.json = json; } public ModelAndView createView() { SimpleJsonView view = new SimpleJsonView(status, json); return new ModelAndView(view, JsonView.asMap(json)); } public static JsonAction jsonOK(Map jsonMap) { return new JsonAction(SC_OK, jsonMap); } private class JsonModelAndView extends ModelAndView { @Override public String getViewName() { return "jsonView"; } public JsonModelAndView(HttpServletResponse response, Object json, int status) { super(new JsonView(), JsonView.asMap(json)); // In IE, there's a problem with caching. We want to cache if we can. // This will force the browser to clear the cache only for this page. // If any other pages need to clear the cache, we might want to move this // logic to an intercepter. response.addHeader("Cache-Control", GoConstants.CACHE_CONTROL); response.setStatus(status); response.setContentType(RESPONSE_CHARSET_JSON); } } }
Java
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.util.xml.impl; import com.intellij.ide.highlighter.DomSupportEnabled; import com.intellij.openapi.Disposable; import com.intellij.openapi.components.ServiceManager; import com.intellij.openapi.fileTypes.StdFileTypes; import com.intellij.openapi.module.Module; import com.intellij.openapi.project.Project; import com.intellij.openapi.roots.ProjectFileIndex; import com.intellij.openapi.util.Condition; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.Factory; import com.intellij.openapi.util.Key; import com.intellij.openapi.vfs.*; import com.intellij.openapi.vfs.newvfs.NewVirtualFile; import com.intellij.pom.PomManager; import com.intellij.pom.PomModel; import com.intellij.pom.PomModelAspect; import com.intellij.pom.event.PomModelEvent; import com.intellij.pom.event.PomModelListener; import com.intellij.pom.xml.XmlAspect; import com.intellij.pom.xml.XmlChangeSet; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiFileFactory; import com.intellij.psi.PsiManager; import com.intellij.psi.impl.PsiManagerEx; import com.intellij.psi.xml.XmlAttribute; import com.intellij.psi.xml.XmlElement; import com.intellij.psi.xml.XmlFile; import com.intellij.psi.xml.XmlTag; import com.intellij.reference.SoftReference; import com.intellij.semantic.SemKey; import com.intellij.semantic.SemService; import com.intellij.util.EventDispatcher; import com.intellij.util.SmartList; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.xml.*; import com.intellij.util.xml.events.DomEvent; import com.intellij.util.xml.reflect.AbstractDomChildrenDescription; import com.intellij.util.xml.reflect.DomGenericInfo; import net.sf.cglib.proxy.AdvancedProxy; import net.sf.cglib.proxy.InvocationHandler; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.lang.ref.WeakReference; import java.lang.reflect.Type; import java.util.*; /** * @author peter */ public final class DomManagerImpl extends DomManager { private static final Key<Object> MOCK = Key.create("MockElement"); static final Key<WeakReference<DomFileElementImpl>> CACHED_FILE_ELEMENT = Key.create("CACHED_FILE_ELEMENT"); static final Key<DomFileDescription> MOCK_DESCRIPTION = Key.create("MockDescription"); static final SemKey<FileDescriptionCachedValueProvider> FILE_DESCRIPTION_KEY = SemKey.createKey("FILE_DESCRIPTION_KEY"); static final SemKey<DomInvocationHandler> DOM_HANDLER_KEY = SemKey.createKey("DOM_HANDLER_KEY"); static final SemKey<IndexedElementInvocationHandler> DOM_INDEXED_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_INDEXED_HANDLER_KEY"); static final SemKey<CollectionElementInvocationHandler> DOM_COLLECTION_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_COLLECTION_HANDLER_KEY"); static final SemKey<CollectionElementInvocationHandler> DOM_CUSTOM_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_CUSTOM_HANDLER_KEY"); static final SemKey<AttributeChildInvocationHandler> DOM_ATTRIBUTE_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_ATTRIBUTE_HANDLER_KEY"); private final EventDispatcher<DomEventListener> myListeners = EventDispatcher.create(DomEventListener.class); private final Project myProject; private final SemService mySemService; private final DomApplicationComponent myApplicationComponent; private boolean myChanging; public DomManagerImpl(Project project) { super(project); myProject = project; mySemService = SemService.getSemService(project); myApplicationComponent = DomApplicationComponent.getInstance(); final PomModel pomModel = PomManager.getModel(project); pomModel.addModelListener(new PomModelListener() { @Override public void modelChanged(PomModelEvent event) { if (myChanging) return; final XmlChangeSet changeSet = (XmlChangeSet)event.getChangeSet(pomModel.getModelAspect(XmlAspect.class)); if (changeSet != null) { for (XmlFile file : changeSet.getChangedFiles()) { DomFileElementImpl<DomElement> element = getCachedFileElement(file); if (element != null) { fireEvent(new DomEvent(element, false)); } } } } @Override public boolean isAspectChangeInteresting(PomModelAspect aspect) { return aspect instanceof XmlAspect; } }, project); VirtualFileManager.getInstance().addVirtualFileListener(new VirtualFileListener() { private final List<DomEvent> myDeletionEvents = new SmartList<>(); @Override public void contentsChanged(@NotNull VirtualFileEvent event) { if (!event.isFromSave()) { fireEvents(calcDomChangeEvents(event.getFile())); } } @Override public void fileMoved(@NotNull VirtualFileMoveEvent event) { fireEvents(calcDomChangeEvents(event.getFile())); } @Override public void beforeFileDeletion(@NotNull final VirtualFileEvent event) { myDeletionEvents.addAll(calcDomChangeEvents(event.getFile())); } @Override public void fileDeleted(@NotNull VirtualFileEvent event) { if (!myDeletionEvents.isEmpty()) { fireEvents(myDeletionEvents); myDeletionEvents.clear(); } } @Override public void propertyChanged(@NotNull VirtualFilePropertyEvent event) { final VirtualFile file = event.getFile(); if (!file.isDirectory() && VirtualFile.PROP_NAME.equals(event.getPropertyName())) { fireEvents(calcDomChangeEvents(file)); } } }, myProject); } public long getPsiModificationCount() { return PsiManager.getInstance(getProject()).getModificationTracker().getModificationCount(); } public <T extends DomInvocationHandler> void cacheHandler(SemKey<T> key, XmlElement element, T handler) { mySemService.setCachedSemElement(key, element, handler); } private PsiFile getCachedPsiFile(VirtualFile file) { return PsiManagerEx.getInstanceEx(myProject).getFileManager().getCachedPsiFile(file); } private List<DomEvent> calcDomChangeEvents(final VirtualFile file) { if (!(file instanceof NewVirtualFile) || myProject.isDisposed()) { return Collections.emptyList(); } final List<DomEvent> events = ContainerUtil.newArrayList(); VfsUtilCore.visitChildrenRecursively(file, new VirtualFileVisitor() { @Override public boolean visitFile(@NotNull VirtualFile file) { if (myProject.isDisposed() || !ProjectFileIndex.SERVICE.getInstance(myProject).isInContent(file)) { return false; } if (!file.isDirectory() && StdFileTypes.XML == file.getFileType()) { final PsiFile psiFile = getCachedPsiFile(file); if (psiFile != null && StdFileTypes.XML.equals(psiFile.getFileType()) && psiFile instanceof XmlFile) { final DomFileElementImpl domElement = getCachedFileElement((XmlFile)psiFile); if (domElement != null) { events.add(new DomEvent(domElement, false)); } } } return true; } @Nullable @Override public Iterable<VirtualFile> getChildrenIterable(@NotNull VirtualFile file) { return ((NewVirtualFile)file).getCachedChildren(); } }); return events; } @SuppressWarnings({"MethodOverridesStaticMethodOfSuperclass"}) public static DomManagerImpl getDomManager(Project project) { return (DomManagerImpl)DomManager.getDomManager(project); } @Override public void addDomEventListener(DomEventListener listener, Disposable parentDisposable) { myListeners.addListener(listener, parentDisposable); } @Override public final ConverterManager getConverterManager() { return ServiceManager.getService(ConverterManager.class); } @Override public final ModelMerger createModelMerger() { return new ModelMergerImpl(); } final void fireEvent(DomEvent event) { if (mySemService.isInsideAtomicChange()) return; incModificationCount(); myListeners.getMulticaster().eventOccured(event); } private void fireEvents(Collection<DomEvent> events) { for (DomEvent event : events) { fireEvent(event); } } @Override public final DomGenericInfo getGenericInfo(final Type type) { return myApplicationComponent.getStaticGenericInfo(type); } @Nullable public static DomInvocationHandler getDomInvocationHandler(DomElement proxy) { if (proxy instanceof DomFileElement) { return null; } if (proxy instanceof DomInvocationHandler) { return (DomInvocationHandler)proxy; } final InvocationHandler handler = AdvancedProxy.getInvocationHandler(proxy); if (handler instanceof StableInvocationHandler) { //noinspection unchecked final DomElement element = ((StableInvocationHandler<DomElement>)handler).getWrappedElement(); return element == null ? null : getDomInvocationHandler(element); } if (handler instanceof DomInvocationHandler) { return (DomInvocationHandler)handler; } return null; } @NotNull public static DomInvocationHandler getNotNullHandler(DomElement proxy) { DomInvocationHandler handler = getDomInvocationHandler(proxy); if (handler == null) { throw new AssertionError("null handler for " + proxy); } return handler; } public static StableInvocationHandler getStableInvocationHandler(Object proxy) { return (StableInvocationHandler)AdvancedProxy.getInvocationHandler(proxy); } public DomApplicationComponent getApplicationComponent() { return myApplicationComponent; } @Override public final Project getProject() { return myProject; } @Override @NotNull public final <T extends DomElement> DomFileElementImpl<T> getFileElement(final XmlFile file, final Class<T> aClass, String rootTagName) { //noinspection unchecked if (file.getUserData(MOCK_DESCRIPTION) == null) { file.putUserData(MOCK_DESCRIPTION, new MockDomFileDescription<>(aClass, rootTagName, file.getViewProvider().getVirtualFile())); mySemService.clearCache(); } final DomFileElementImpl<T> fileElement = getFileElement(file); assert fileElement != null; return fileElement; } @SuppressWarnings({"unchecked"}) @NotNull final <T extends DomElement> FileDescriptionCachedValueProvider<T> getOrCreateCachedValueProvider(final XmlFile xmlFile) { //noinspection ConstantConditions return mySemService.getSemElement(FILE_DESCRIPTION_KEY, xmlFile); } public final Set<DomFileDescription> getFileDescriptions(String rootTagName) { return myApplicationComponent.getFileDescriptions(rootTagName); } public final Set<DomFileDescription> getAcceptingOtherRootTagNameDescriptions() { return myApplicationComponent.getAcceptingOtherRootTagNameDescriptions(); } @NotNull @NonNls public final String getComponentName() { return getClass().getName(); } final void runChange(Runnable change) { final boolean b = setChanging(true); try { change.run(); } finally { setChanging(b); } } final boolean setChanging(final boolean changing) { boolean oldChanging = myChanging; if (changing) { assert !oldChanging; } myChanging = changing; return oldChanging; } @Override @Nullable public final <T extends DomElement> DomFileElementImpl<T> getFileElement(XmlFile file) { if (file == null) return null; if (!(file.getFileType() instanceof DomSupportEnabled)) return null; final VirtualFile virtualFile = file.getVirtualFile(); if (virtualFile != null && virtualFile.isDirectory()) return null; return this.<T>getOrCreateCachedValueProvider(file).getFileElement(); } @Nullable static <T extends DomElement> DomFileElementImpl<T> getCachedFileElement(@NotNull XmlFile file) { //noinspection unchecked return SoftReference.dereference(file.getUserData(CACHED_FILE_ELEMENT)); } @Override @Nullable public final <T extends DomElement> DomFileElementImpl<T> getFileElement(XmlFile file, Class<T> domClass) { final DomFileDescription description = getDomFileDescription(file); if (description != null && myApplicationComponent.assignabilityCache.isAssignable(domClass, description.getRootElementClass())) { return getFileElement(file); } return null; } @Override @Nullable public final DomElement getDomElement(final XmlTag element) { if (myChanging) return null; final DomInvocationHandler handler = getDomHandler(element); return handler != null ? handler.getProxy() : null; } @Override @Nullable public GenericAttributeValue getDomElement(final XmlAttribute attribute) { if (myChanging) return null; final AttributeChildInvocationHandler handler = mySemService.getSemElement(DOM_ATTRIBUTE_HANDLER_KEY, attribute); return handler == null ? null : (GenericAttributeValue)handler.getProxy(); } @Nullable public DomInvocationHandler getDomHandler(final XmlElement tag) { if (tag == null) return null; List<DomInvocationHandler> cached = mySemService.getCachedSemElements(DOM_HANDLER_KEY, tag); if (cached != null && !cached.isEmpty()) { return cached.get(0); } return mySemService.getSemElement(DOM_HANDLER_KEY, tag); } @Override @Nullable public AbstractDomChildrenDescription findChildrenDescription(@NotNull final XmlTag tag, @NotNull final DomElement parent) { return findChildrenDescription(tag, getDomInvocationHandler(parent)); } static AbstractDomChildrenDescription findChildrenDescription(final XmlTag tag, final DomInvocationHandler parent) { final DomGenericInfoEx info = parent.getGenericInfo(); return info.findChildrenDescription(parent, tag.getLocalName(), tag.getNamespace(), false, tag.getName()); } public final boolean isDomFile(@Nullable PsiFile file) { return file instanceof XmlFile && getFileElement((XmlFile)file) != null; } @Nullable public final DomFileDescription<?> getDomFileDescription(PsiElement element) { if (element instanceof XmlElement) { final PsiFile psiFile = element.getContainingFile(); if (psiFile instanceof XmlFile) { return getDomFileDescription((XmlFile)psiFile); } } return null; } @Override public final <T extends DomElement> T createMockElement(final Class<T> aClass, final Module module, final boolean physical) { final XmlFile file = (XmlFile)PsiFileFactory.getInstance(myProject).createFileFromText("a.xml", StdFileTypes.XML, "", (long)0, physical); file.putUserData(MOCK_ELEMENT_MODULE, module); file.putUserData(MOCK, new Object()); return getFileElement(file, aClass, "I_sincerely_hope_that_nobody_will_have_such_a_root_tag_name").getRootElement(); } @Override public final boolean isMockElement(DomElement element) { return DomUtil.getFile(element).getUserData(MOCK) != null; } @Override public final <T extends DomElement> T createStableValue(final Factory<T> provider) { return createStableValue(provider, t -> t.isValid()); } @Override public final <T> T createStableValue(final Factory<T> provider, final Condition<T> validator) { final T initial = provider.create(); assert initial != null; final StableInvocationHandler handler = new StableInvocationHandler<>(initial, provider, validator); final Set<Class> intf = new HashSet<>(); ContainerUtil.addAll(intf, initial.getClass().getInterfaces()); intf.add(StableElement.class); //noinspection unchecked return (T)AdvancedProxy.createProxy(initial.getClass().getSuperclass(), intf.toArray(new Class[intf.size()]), handler); } public final <T extends DomElement> void registerFileDescription(final DomFileDescription<T> description, Disposable parentDisposable) { registerFileDescription(description); Disposer.register(parentDisposable, new Disposable() { @Override public void dispose() { getFileDescriptions(description.getRootTagName()).remove(description); getAcceptingOtherRootTagNameDescriptions().remove(description); } }); } @Override public final void registerFileDescription(final DomFileDescription description) { mySemService.clearCache(); myApplicationComponent.registerFileDescription(description); } @Override @NotNull public final DomElement getResolvingScope(GenericDomValue element) { final DomFileDescription<?> description = DomUtil.getFileElement(element).getFileDescription(); return description.getResolveScope(element); } @Override @Nullable public final DomElement getIdentityScope(DomElement element) { final DomFileDescription description = DomUtil.getFileElement(element).getFileDescription(); return description.getIdentityScope(element); } @Override public TypeChooserManager getTypeChooserManager() { return myApplicationComponent.getTypeChooserManager(); } public void performAtomicChange(@NotNull Runnable change) { mySemService.performAtomicChange(change); if (!mySemService.isInsideAtomicChange()) { incModificationCount(); } } public SemService getSemService() { return mySemService; } }
Java
# Copyright (c) 2013-2016 Cinchapi Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nose.tools import * import os import time from subprocess import * import signal from . import test_data from concourse import Concourse, Tag, Link, Diff, Operator, constants from concourse.thriftapi.shared.ttypes import Type from concourse.utils import python_to_thrift import ujson from tests import ignore import socket class IntegrationBaseTest(object): """ Base class for unit tests that use Mockcourse. """ port = None process = None client = None expected_network_latency = 0.05 @classmethod def setup_class(cls): """ Fixture method to start Mockcourse and connect before the tests start to run. """ port = IntegrationBaseTest.get_open_port() dir = os.path.dirname(os.path.realpath(__file__)) + '/../../mockcourse' script = dir + '/mockcourse '+str(port) cls.process = Popen(script, shell=True, preexec_fn=os.setsid) cls.client = None tries = 5 while tries > 0 and cls.client is None: tries -= 1 time.sleep(1) # Wait for Mockcourse to start try: cls.client = Concourse.connect(port=port) except RuntimeError as e: if tries == 0: raise e else: continue @classmethod def teardown_class(cls): """ Fixture method to kill Mockcourse after all the tests have fun. """ os.killpg(cls.process.pid, signal.SIGTERM) def tearDown(self): """" Logout" and clear all the data that the client stored in Mockcourse after each test. This ensures that the environment for each test is clean and predicatable. """ self.client.logout() # Mockcourse logout simply clears the content of the datastore def get_time_anchor(self): """ Return a time anchor and sleep for long enough to account for network latency """ anchor = test_data.current_time_millis() time.sleep(self.expected_network_latency) return anchor @staticmethod def get_open_port(): """Return an open port that is chosen by the OS """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("localhost", 0)) port = sock.getsockname()[1] sock.close() return port class TestPythonClientDriver(IntegrationBaseTest): """ Implementations for standard unit tests that verify the Python client driver conforms to the Concourse standard """ def __do_test_value_round_trip(self, value, ttype): """ Do the round_trip test logic for the specified value of the specified type :param value: """ key = test_data.random_string() record = self.client.add(key=key, value=value) stored = self.client.get(key=key, record=record) assert_equal(value, stored) assert_equal(python_to_thrift(stored).type, ttype) def test_string_round_trip(self): self.__do_test_value_round_trip(test_data.random_string(), Type.STRING) def test_bool_round_trip(self): self.__do_test_value_round_trip(test_data.random_bool(), Type.BOOLEAN) def test_tag_round_trip(self): self.__do_test_value_round_trip(Tag.create(test_data.random_string()), Type.TAG) def test_link_round_trip(self): self.__do_test_value_round_trip(Link.to(test_data.random_int()), Type.LINK) def test_int_round_trip(self): self.__do_test_value_round_trip(test_data.random_int(), Type.INTEGER) self.__do_test_value_round_trip(2147483647, Type.INTEGER) self.__do_test_value_round_trip(-2147483648, Type.INTEGER) def test_long_round_trip(self): self.__do_test_value_round_trip(2147483648, Type.LONG) self.__do_test_value_round_trip(-2147483649, Type.LONG) self.__do_test_value_round_trip(test_data.random_long(), Type.LONG) def test_float_round_trip(self): self.__do_test_value_round_trip(3.4028235E38, Type.DOUBLE) self.__do_test_value_round_trip(-1.4E-45, Type.DOUBLE) def test_abort(self): self.client.stage() key = test_data.random_string() value = "some value" record = 1 self.client.add(key=key, value=value, record=record) self.client.abort() assert_is_none(self.client.get(key=key, record=record)) def test_add_key_value(self): key = test_data.random_string() value = "static value" record = self.client.add(key=key, value=value) assert_is_not_none(record) stored = self.client.get(key=key, record=record) assert_equal(stored, value) def test_add_key_value_record(self): key = test_data.random_string() value = "static value" record = 17 assert_true(self.client.add(key=key, value=value, record=record)) stored = self.client.get(key=key, record=record) assert_equal(stored, value) def test_add_key_value_records(self): key = test_data.random_string() value = "static value" records = [1, 2, 3] result = self.client.add(key=key, value=value, records=records) assert_true(isinstance(result, dict)) assert_true(result.get(1)) assert_true(result.get(2)) assert_true(result.get(3)) def test_audit_key_record(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1000 for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record) assert_equal(5, len(audit)) expected = 'ADD' for k, v in audit.items(): assert_true(v.startswith(expected)) expected = 'REMOVE' if expected == 'ADD' else 'ADD' def test_audit_key_record_start(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1001 for value in values: self.client.set(key, value, record) start = self.client.time() values = [4, 5, 6] for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record, start=start) assert_equal(6, len(audit)) def test_audit_key_record_start_end(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1002 for value in values: self.client.set(key, value, record) start = self.client.time() values = [4, 5, 6] for value in values: self.client.set(key, value, record) end = self.client.time() values = [True, False] for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record, start=start, end=end) assert_equal(6, len(audit)) def test_audit_key_record_startstr(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1001 for value in values: self.client.set(key, value, record) anchor = self.get_time_anchor() values = [4, 5, 6] for value in values: self.client.set(key, value, record) start = test_data.get_elapsed_millis_string(anchor) audit = self.client.audit(key, record, start=start) assert_equal(6, len(audit)) def test_audit_key_record_startstr_endstr(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1002 for value in values: self.client.set(key, value, record) start_anchor = self.get_time_anchor() values = [4, 5, 6] for value in values: self.client.set(key, value, record) end_anchor = self.get_time_anchor() values = [True, False] for value in values: self.client.set(key, value, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) audit = self.client.audit(key, record, start=start, end=end) assert_equal(6, len(audit)) def test_audit_record(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "foo" record = 1002 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) audit = self.client.audit(record) assert_equal(3, len(audit)) def test_audit_record_start(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = self.client.time() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) audit = self.client.audit(record, start=start) assert_equal(3, len(audit)) def test_audit_record_start_end(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = self.client.time() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) end = self.client.time() self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) audit = self.client.audit(record, start=start, end=end) assert_equal(3, len(audit)) def test_audit_record_startstr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) anchor = self.get_time_anchor() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) start = test_data.get_elapsed_millis_string(anchor) audit = self.client.audit(record, start=start) assert_equal(3, len(audit)) def test_audit_record_startstr_endstr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start_anchor = self.get_time_anchor() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) end_anchor = self.get_time_anchor() self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) audit = self.client.audit(record, start=start, end=end) assert_equal(3, len(audit)) def test_browse_key(self): key = test_data.random_string() value = 10 self.client.add(key, value, [1, 2, 3]) value = test_data.random_string() self.client.add(key, value, [10, 20, 30]) data = self.client.browse(key) assert_equal([1, 2, 3], data.get(10)) assert_equal([20, 10, 30], data.get(value)) def test_browse_keys(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) data = self.client.browse([key1, key2, key3]) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_keys_time(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) time = self.client.time() self.client.add(key1, "Foo") self.client.add(key2, "Foo") self.client.add(key3, "Foo") data = self.client.browse([key1, key2, key3], time=time) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_key_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) ts = test_data.get_elapsed_millis_string(self.get_time_anchor()) data = self.client.browse([key1, key2, key3], time=ts) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) @ignore def test_browse_keys_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) anchor = self.get_time_anchor() self.client.add(key1, "D", record1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.browse([key1, key2, key3], time=ts) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_key_time(self): key = test_data.random_string() value = 10 self.client.add(key, value, [1, 2, 3]) value = test_data.random_string() self.client.add(key, value, [10, 20, 30]) timestamp = self.client.time() self.client.add(key=key, value=True) data = self.client.browse(key, timestamp) assert_equal([1, 2, 3], data.get(10)) assert_equal([20, 10, 30], data.get(value)) def test_chronologize_key_record(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record) assert_equal([[1], [1, 2], [1, 2, 3], [2, 3], [3]], list(data.values())) def test_chronologize_key_record_start(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start = self.client.time() self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record, time=start) assert_equal([[2, 3], [3]], list(data.values())) def test_chronologize_key_record_start_end(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start = self.client.time() self.client.remove(key, 1, record) end = self.client.time() self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record, timestamp=start, end=end) assert_equal([[2, 3]], list(data.values())) def test_chronologize_key_record_startstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) anchor = self.get_time_anchor() self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) start = test_data.get_elapsed_millis_string(anchor) data = self.client.chronologize(key, record, time=start) assert_equal([[2, 3], [3]], list(data.values())) def test_chronologize_key_record_startstr_endstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start_anchor = self.get_time_anchor() self.client.remove(key, 1, record) end_anchor = self.get_time_anchor() self.client.remove(key, 2, record) self.client.remove(key, 3, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) data = self.client.chronologize(key, record, timestamp=start, end=end) assert_equal([[2, 3]], list(data.values())) def test_clear_key_record(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) self.client.clear(key=key, record=record) data = self.client.select(key=key, record=record) assert_equal([], data) def test_clear_key_records(self): key = test_data.random_string() records = [1, 2, 3] self.client.add(key, 1, records) self.client.add(key, 2, records) self.client.add(key, 3, records) self.client.clear(key=key, records=records) data = self.client.select(key=key, records=records) assert_equal({}, data) def test_clear_keys_record(self): key1 = test_data.random_string(6) key2 = test_data.random_string(7) key3 = test_data.random_string(8) record = test_data.random_long() self.client.add(key1, 1, record) self.client.add(key2, 2, record) self.client.add(key3, 3, record) self.client.clear(keys=[key1, key2, key3], record=record) data = self.client.select(keys=[key1, key2, key3], record=record) assert_equal({}, data) def test_clear_keys_records(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True], 'd': 'D' } records = [1, 2, 3] self.client.insert(data=data, records=records) self.client.clear(keys=['a', 'b', 'c'], records=records) data = self.client.get(key='d', records=records) assert_equal({ 1: 'D', 2: 'D', 3: 'D' }, data) def test_clear_record(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True] } record = next(iter(self.client.insert(data))) self.client.clear(record=record) data = self.client.select(record=record) assert_equal({}, data) def test_clear_records(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True], 'd': 'D' } records = [1, 2, 3] self.client.insert(data=data, records=records) self.client.clear(records=records) data = self.client.select(records=records) assert_equal({1: {}, 2: {}, 3: {}}, data) def test_commit(self): self.client.stage() record = self.client.add("name", "jeff nelson") self.client.commit() assert_equal(['name'], list(self.client.describe(record))) def test_describe_record(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) keys = self.client.describe(1) assert_equals(['age', 'name', 'team'], keys) def test_describe_record_time(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) timestamp = self.client.time() self.client.clear('name', 1) keys = self.client.describe(1, time=timestamp) assert_equals(['age', 'name', 'team'], keys) def test_describe_record_timestr(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) anchor = self.get_time_anchor() self.client.clear('name', 1) timestamp = test_data.get_elapsed_millis_string(anchor) keys = self.client.describe(1, time=timestamp) assert_equals(['age', 'name', 'team'], keys) def test_describe_records(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) keys = self.client.describe(records) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_describe_records_time(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) timestamp = self.client.time() self.client.clear(records=records) keys = self.client.describe(records, timestamp=timestamp) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_describe_records_timestr(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) anchor = self.get_time_anchor() self.client.clear(records=records) timestamp = test_data.get_elapsed_millis_string(anchor) keys = self.client.describe(records, timestamp=timestamp) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_diff_key_record_start(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start = self.client.time() self.client.add(key, 2, record) self.client.remove(key, 1, record) diff = self.client.diff(key, record, start) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_startstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) anchor = self.get_time_anchor() self.client.add(key, 2, record) self.client.remove(key, 1, record) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(key, record, start) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_start_end(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start = self.client.time() self.client.add(key, 2, record) self.client.remove(key, 1, record) end = self.client.time() self.client.set(key, 3, record) diff = self.client.diff(key, record, start, end) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_startstr_endstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start_anchor = self.get_time_anchor() self.client.add(key, 2, record) self.client.remove(key, 1, record) end_anchor = self.get_time_anchor() self.client.set(key, 3, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(key, record, start, end) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_start(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start = self.client.time() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) diff = self.client.diff(key=key, start=start) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_startstr(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) anchor = self.get_time_anchor() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(key=key, start=start) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_start_end(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start = self.client.time() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) end = self.client.time() self.client.add(key=key, value=4, record=1) diff = self.client.diff(key=key, start=start, end=end) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_startstr_endstr(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start_anchor = self.get_time_anchor() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) end_anchor = self.get_time_anchor() self.client.add(key=key, value=4, record=1) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(key=key, start=start, end=end) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_record_start(self): self.client.add(key="foo", value=1, record=1) start = self.client.time() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) diff = self.client.diff(record=1, time=start) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_startstr(self): self.client.add(key="foo", value=1, record=1) anchor = self.get_time_anchor() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(record=1, time=start) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_start_end(self): self.client.add(key="foo", value=1, record=1) start = self.client.time() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) end = self.client.time() self.client.set(key="car", value=100, record=1) diff = self.client.diff(record=1, time=start, end=end) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_startstr_endstr(self): self.client.add(key="foo", value=1, record=1) start_anchor = self.get_time_anchor() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) end_anchor = self.get_time_anchor() self.client.set(key="car", value=100, record=1) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(record=1, time=start, end=end) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_find_ccl(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key+' > 3')) assert_equal(list(range(4, 10)), records) @raises(Exception) def test_find_ccl_handle_parse_exception(self): self.client.find(ccl="throw parse exception") def test_find_key_operator_value(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_values(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6])) assert_equal([3, 4, 5], records) def test_find_key_operator_value_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_value_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_values_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operator_values_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator="bw", values=[3, 6])) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_value(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator="gt", value=3)) assert_equal(list(range(4, 10)), records) def test_find_key_operatorstr_value_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator="gt", value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operatorstr_value_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator="gt", value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_get_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ccl = key2 + ' = 10' data = self.client.get(ccl=ccl) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ts = self.client.time() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' data = self.client.get(ccl=ccl, time=ts) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) anchor = self.get_time_anchor() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(ccl=ccl, time=ts) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_key_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.get(key=key1, ccl=ccl) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.get(keys=[key1, key2], ccl=ccl) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.get(key=key1, ccl=ccl, time=ts) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.get(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(key=key1, ccl=ccl, time=ts) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) assert_equal(3, self.client.get(key='foo', record=1)) def test_get_key_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) ts = self.client.time() self.client.add('foo', 4, 1) assert_equal(3, self.client.get(key='foo', record=1, time=ts)) def test_get_key_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) anchor = self.get_time_anchor() self.client.add('foo', 4, 1) ts = test_data.get_elapsed_millis_string(anchor) assert_equal(3, self.client.get(key='foo', record=1, time=ts)) def test_get_key_records(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3])) def test_get_key_records_time(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) ts = self.client.time() self.client.add('foo', 4, [1, 2, 3]) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3], time=ts)) def test_get_key_records_timestr(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) anchor = self.get_time_anchor() self.client.add('foo', 4, [1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3], time=ts)) def test_get_keys_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) data = self.client.get(keys=['foo', 'bar'], record=1) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) ts = self.client.time() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) data = self.client.get(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) anchor = self.get_time_anchor() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_get_keys_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_get_keys_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.get(keys=['foo', 'bar'], records=[1, 2]) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_insert_dict(self): data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } record = self.client.insert(data=data)[0] assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_dicts(self): data = [ { 'foo': 1 }, { 'foo': 2 }, { 'foo': 3 } ] records = self.client.insert(data=data) assert_equal(len(data), len(records)) def test_insert_json(self): data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) record = self.client.insert(data=data)[0] assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_json_list(self): data = [ { 'foo': 1 }, { 'foo': 2 }, { 'foo': 3 } ] count = len(data) data = ujson.dumps(data) records = self.client.insert(data=data) assert_equal(count, len(records)) def test_insert_dict_record(self): record = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } result = self.client.insert(data=data, record=record) assert_true(result) assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_json_record(self): record = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) result = self.client.insert(data=data, record=record) assert_true(result) assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_dict_records(self): record1 = test_data.random_long() record2 = test_data.random_long() record3 = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } result = self.client.insert(data=data, records=[record1, record2, record3]) assert_true({ record1: True, record2: True, record3: True }, result) def test_insert_json_records(self): record1 = test_data.random_long() record2 = test_data.random_long() record3 = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) result = self.client.insert(data=data, records=[record1, record2, record3]) assert_true({ record1: True, record2: True, record3: True }, result) def test_inventory(self): records = [1, 2, 3, 4, 5, 6, 7] self.client.add(key='foo', value=17, records=records) assert_equal(records, self.client.inventory()) def test_jsonify_records(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2]) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) def test_jsonify_records_identifier(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2], id=True) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_jsonify_records_time(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) ts = self.client.time() self.client.add('foo', 10, [record1, record2]) dump = self.client.jsonify(records=[record1, record2], time=ts) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) @ignore def test_jsonify_records_timestr(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) anchor = self.get_time_anchor() self.client.add('foo', 10, [record1, record2]) ts = test_data.get_elapsed_millis_string(anchor) dump = self.client.jsonify(records=[record1, record2], time=ts) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) def test_jsonify_records_identifier_time(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) ts = self.client.time() self.client.add(key='foo', value=True, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2], id=True, time=ts) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_jsonify_records_identifier_timestr(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) anchor = self.get_time_anchor() self.client.add(key='foo', value=True, records=[record1, record2]) ts = test_data.get_elapsed_millis_string(anchor) dump = self.client.jsonify(records=[record1, record2], id=True, time=ts) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_ping_record(self): record = 1 assert_false(self.client.ping(record)) self.client.add(key='foo', value=1, record=record) assert_true(self.client.ping(record)) self.client.clear(key='foo', record=record) assert_false(self.client.ping(record)) def test_ping_records(self): self.client.add(key='foo', value=1, records=[1, 2]) data = self.client.ping([1, 2, 3]) assert_equal({ 1: True, 2: True, 3: False }, data) def test_remove_key_value_record(self): key = 'foo' value = 1 record = 1 assert_false(self.client.remove(key, value, record)) self.client.add(key, value, record) assert_true(self.client.remove(key=key, record=record, value=value)) def test_remove_key_value_records(self): key = 'foo' value = 1 self.client.add(key, value, records=[1, 2]) data = self.client.remove(key, value, records=[1, 2, 3]) assert_equal({ 1: True, 2: True, 3: False }, data) def test_revert_key_records_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key='one', records=[1, 2, 3], time=ts) data = self.client.select(key='one', record=[1, 2, 3]) assert_equal({ 1: [1], 2: [1], 3: [1] }, data) def test_revert_key_records_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key='one', records=[1, 2, 3], time=ts) data = self.client.select(key='one', record=[1, 2, 3]) assert_equal({ 1: [1], 2: [1], 3: [1] }, data) def test_revert_keys_records_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts) data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3]) data3 = { 'one': [1], 'two': [2], 'three': [3] } assert_equal({ 1: data3, 2: data3, 3: data3 }, data) def test_revert_keys_records_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts) data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3]) data3 = { 'one': [1], 'two': [2], 'three': [3] } assert_equal({ 1: data3, 2: data3, 3: data3 }, data) def test_revert_keys_record_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key=['one', 'two', 'three'], records=1, time=ts) data = self.client.select(key=['one', 'two', 'three'], record=1) assert_equal({ 'one': [1], 'two': [2], 'three': [3] }, data) def test_revert_keys_record_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key=['one', 'two', 'three'], records=1, time=ts) data = self.client.select(key=['one', 'two', 'three'], record=1) assert_equal({ 'one': [1], 'two': [2], 'three': [3] }, data) def test_revert_key_record_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key='one', records=1, time=ts) data = self.client.select(key='one', record=1) assert_equal([1], data) def test_revert_key_record_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key='one', records=1, time=ts) data = self.client.select(key='one', record=1) assert_equal([1], data) def test_search(self): self.client.add(key="name", value="jeff", record=1) self.client.add(key="name", value="jeffery", record=2) self.client.add(key="name", value="jeremy", record=3) self.client.add(key="name", value="ben jefferson", record=4) records = self.client.search(key="name", query="jef") assert_equal([1, 2, 4], records) def test_select_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ccl = key2 + ' = 10' data = self.client.select(ccl=ccl) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ts = self.client.time() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' data = self.client.select(ccl=ccl, time=ts) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) anchor = self.get_time_anchor() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(ccl=ccl, time=ts) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_key_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.select(key=key1, ccl=ccl) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.select(keys=[key1, key2], ccl=ccl) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.select(key=key1, ccl=ccl, time=ts) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.select(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(key=key1, ccl=ccl, time=ts) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) assert_equal([1, 2, 3], self.client.select(key='foo', record=1)) def test_select_key_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) ts = self.client.time() self.client.add('foo', 4, 1) assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts)) def test_select_key_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) anchor = self.get_time_anchor() self.client.add('foo', 4, 1) ts = test_data.get_elapsed_millis_string(anchor) assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts)) def test_select_key_records(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3])) def test_select_key_records_time(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) ts = self.client.time() self.client.add('foo', 4, [1, 2, 3]) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3], time=ts)) def test_select_key_records_timestr(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) anchor = self.get_time_anchor() self.client.add('foo', 4, [1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3], time=ts)) def test_select_keys_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) data = self.client.select(keys=['foo', 'bar'], record=1) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) ts = self.client.time() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) data = self.client.select(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) anchor = self.get_time_anchor() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_keys_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_keys_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(keys=['foo', 'bar'], records=[1, 2]) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_record(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(record=1) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_record_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select(record=2, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_record_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(record=2, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(records=[1, 2]) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select( records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select( records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_set_key_value(self): key = "foo" value = 1 record = self.client.set(key=key, value=value) data = self.client.select(record=record) assert_equal({ 'foo': [1] }, data) def test_set_key_value_record(self): key = "foo" value = 1 record = 1 self.client.add(key=key, value=2, record=record) self.client.add(key=key, value=2, record=record) self.client.set(key=key, value=value, record=record) data = self.client.select(record=record) assert_equal({ 'foo': [1] }, data) def test_set_key_value_records(self): key = "foo" value = 1 records = [1, 2, 3] self.client.add(key=key, value=2, record=records) self.client.add(key=key, value=2, record=records) self.client.set(key=key, value=value, record=records) data = self.client.select(record=records) expected = { 'foo': [1] } assert_equal({ 1: expected, 2: expected, 3: expected }, data) def test_stage(self): assert_is_none(self.client.transaction) self.client.stage() assert_is_not_none(self.client.transaction) self.client.abort() def test_time(self): assert_true(isinstance(self.client.time(), int)) def test_time_phrase(self): assert_true(isinstance(self.client.time("3 seconds ago"), int)) def test_verify_and_swap(self): self.client.add("foo", 2, 2) assert_false(self.client.verify_and_swap(key='foo', expected=1, record=2, replacement=3)) assert_true(self.client.verify_and_swap(key='foo', expected=2, record=2, replacement=3)) assert_equal(3, self.client.get(key='foo', record=2)) def test_verify_or_set(self): self.client.add("foo", 2, 2) self.client.verify_or_set(key='foo', value=3, record=2) assert_equal(3, self.client.get(key='foo', record=2)) def test_verify_key_value_record(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) assert_true(self.client.verify('name', 'jeff', 1)) self.client.remove('name', 'jeff', 1) assert_false(self.client.verify('name', 'jeff', 1)) def test_verify_key_value_record_time(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) ts = self.client.time() self.client.remove('name', 'jeff', 1) assert_true(self.client.verify('name', 'jeff', 1, time=ts)) def test_verify_key_value_record_timestr(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) anchor = self.get_time_anchor() self.client.remove('name', 'jeff', 1) ts = test_data.get_elapsed_millis_string(anchor) assert_true(self.client.verify('name', 'jeff', 1, time=ts)) def test_link_key_source_destination(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_equal(Link.to(2), self.client.get('friends', record=1)) def test_link_key_source_destinations(self): assert_equal({ 2: True, 3: True, 4: True }, self.client.link(key='friends', source=1, destination=[2, 3, 4])) def test_unlink_key_source_destination(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_true(self.client.unlink(key='friends', source=1, destination=2)) def test_unlink_key_source_destinations(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_equal({ 2: True, 3: False }, self.client.unlink(key='friends', source=1, destination=[2, 3])) def test_find_or_add_key_value(self): record = self.client.find_or_add("age", 23) assert_equal(23, self.client.get("age", record)) def test_find_or_insert_ccl_json(self): data = { 'name': 'jeff nelson' } data = ujson.dumps(data) record = self.client.find_or_insert(criteria="age > 10", data=data) assert_equal('jeff nelson', self.client.get("name", record)) def test_find_or_insert_ccl_dict(self): data = { 'name': 'jeff nelson' } record = self.client.find_or_insert(criteria="age > 10", data=data) assert_equal('jeff nelson', self.client.get("name", record)) def test_insert_dict_with_link(self): data = { 'foo': Link.to(1) } record = self.client.insert(data=data)[0] assert_equal(Link.to(1), self.client.get(key='foo', record=record)) def test_insert_dict_with_resolvable_link(self): record1 = self.client.add('foo', 1) record2 = self.client.insert(data={ 'foo': Link.to_where('foo = 1') })[0] assert_equal(Link.to(record1), self.client.get(key='foo', record=record2))
Java
module('BadAriaRole'); test('No elements === no problems.', function(assert) { var config = { ruleName: 'badAriaRole', expected: axs.constants.AuditResult.NA }; assert.runRule(config); }); test('No roles === no problems.', function(assert) { // Setup fixture var fixture = document.getElementById('qunit-fixture'); for (var i = 0; i < 10; i++) fixture.appendChild(document.createElement('div')); var config = { ruleName: 'badAriaRole', expected: axs.constants.AuditResult.NA }; assert.runRule(config); }); test('Good role === no problems.', function(assert) { // Setup fixture var fixture = document.getElementById('qunit-fixture'); for (var r in axs.constants.ARIA_ROLES) { if (axs.constants.ARIA_ROLES.hasOwnProperty(r) && !axs.constants.ARIA_ROLES[r]['abstract']) { var div = document.createElement('div'); div.setAttribute('role', r); fixture.appendChild(div); } } var config = { ruleName: 'badAriaRole', expected: axs.constants.AuditResult.PASS, elements: [] }; assert.runRule(config); }); test('Bad role == problem', function(assert) { // Setup fixture var fixture = document.getElementById('qunit-fixture'); var div = document.createElement('div'); div.setAttribute('role', 'not-an-aria-role'); fixture.appendChild(div); var config = { ruleName: 'badAriaRole', expected: axs.constants.AuditResult.FAIL, elements: [div] }; assert.runRule(config); }); test('Abstract role == problem', function(assert) { // Setup fixture var fixture = document.getElementById('qunit-fixture'); var div = document.createElement('div'); div.setAttribute('role', 'input'); fixture.appendChild(div); var config = { ruleName: 'badAriaRole', expected: axs.constants.AuditResult.FAIL, elements: [div] }; assert.runRule(config); });
Java
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/securityhub/model/StandardsSubscriptionRequest.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace SecurityHub { namespace Model { StandardsSubscriptionRequest::StandardsSubscriptionRequest() : m_standardsArnHasBeenSet(false), m_standardsInputHasBeenSet(false) { } StandardsSubscriptionRequest::StandardsSubscriptionRequest(JsonView jsonValue) : m_standardsArnHasBeenSet(false), m_standardsInputHasBeenSet(false) { *this = jsonValue; } StandardsSubscriptionRequest& StandardsSubscriptionRequest::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("StandardsArn")) { m_standardsArn = jsonValue.GetString("StandardsArn"); m_standardsArnHasBeenSet = true; } if(jsonValue.ValueExists("StandardsInput")) { Aws::Map<Aws::String, JsonView> standardsInputJsonMap = jsonValue.GetObject("StandardsInput").GetAllObjects(); for(auto& standardsInputItem : standardsInputJsonMap) { m_standardsInput[standardsInputItem.first] = standardsInputItem.second.AsString(); } m_standardsInputHasBeenSet = true; } return *this; } JsonValue StandardsSubscriptionRequest::Jsonize() const { JsonValue payload; if(m_standardsArnHasBeenSet) { payload.WithString("StandardsArn", m_standardsArn); } if(m_standardsInputHasBeenSet) { JsonValue standardsInputJsonMap; for(auto& standardsInputItem : m_standardsInput) { standardsInputJsonMap.WithString(standardsInputItem.first, standardsInputItem.second); } payload.WithObject("StandardsInput", std::move(standardsInputJsonMap)); } return payload; } } // namespace Model } // namespace SecurityHub } // namespace Aws
Java
+++ title = "John Behrens" image = "john-behrens.jpg" twitter = "webconsultseu" website = "http://www.skills-for-teams.com" type = "speaker" linktitle = "john-behrens" +++ John Behrens is a freelancer from Hamburg Germany, he is fulfilling different roles in Developer, Tester, Scrum Master, Engineer, Architect, Trainer or Agile Coach. He is mostly willing to use any skill that is needed to help the team and take a project to success.
Java
require 'puppet/node/facts' require 'puppet/indirector/rest' require 'puppet/util/puppetdb' class Puppet::Node::Facts::Puppetdb < Puppet::Indirector::REST include Puppet::Util::Puppetdb include Puppet::Util::Puppetdb::CommandNames def save(request) facts = request.instance.dup facts.values = facts.values.dup facts.stringify submit_command(request.key, facts.to_pson, CommandReplaceFacts, 1) end def find(request) begin response = http_get(request, "/v2/nodes/#{CGI.escape(request.key)}/facts", headers) log_x_deprecation_header(response) if response.is_a? Net::HTTPSuccess result = PSON.parse(response.body) # Note: the Inventory Service API appears to expect us to return nil here # if the node isn't found. However, PuppetDB returns an empty array in # this case; for now we will just look for that condition and assume that # it means that the node wasn't found, so we will return nil. In the # future we may want to improve the logic such that we can distinguish # between the "node not found" and the "no facts for this node" cases. if result.empty? return nil end facts = result.inject({}) do |a,h| a.merge(h['name'] => h['value']) end Puppet::Node::Facts.new(request.key, facts) else # Newline characters cause an HTTP error, so strip them raise "[#{response.code} #{response.message}] #{response.body.gsub(/[\r\n]/, '')}" end rescue => e raise Puppet::Error, "Failed to find facts from PuppetDB at #{self.class.server}:#{self.class.port}: #{e}" end end # Search for nodes matching a set of fact constraints. The constraints are # specified as a hash of the form: # # `{type.name.operator => value` # # The only accepted `type` is 'facts'. # # `name` must be the fact name to query against. # # `operator` may be one of {eq, ne, lt, gt, le, ge}, and will default to 'eq' # if unspecified. def search(request) return [] unless request.options operator_map = { 'eq' => '=', 'gt' => '>', 'lt' => '<', 'ge' => '>=', 'le' => '<=', } filters = request.options.sort.map do |key,value| type, name, operator = key.to_s.split('.') operator ||= 'eq' raise Puppet::Error, "Fact search against keys of type '#{type}' is unsupported" unless type == 'facts' if operator == 'ne' ['not', ['=', ['fact', name], value]] else [operator_map[operator], ['fact', name], value] end end query = ["and"] + filters query_param = CGI.escape(query.to_pson) begin response = http_get(request, "/v2/nodes?query=#{query_param}", headers) log_x_deprecation_header(response) if response.is_a? Net::HTTPSuccess PSON.parse(response.body).collect {|s| s["name"]} else # Newline characters cause an HTTP error, so strip them raise "[#{response.code} #{response.message}] #{response.body.gsub(/[\r\n]/, '')}" end rescue => e raise Puppet::Error, "Could not perform inventory search from PuppetDB at #{self.class.server}:#{self.class.port}: #{e}" end end def headers { "Accept" => "application/json", "Content-Type" => "application/x-www-form-urlencoded; charset=UTF-8", } end end
Java
/* * Copyright 2021 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.quarkus.runtime.storage.database.liquibase; import java.lang.reflect.Method; import java.sql.Connection; import javax.xml.parsers.SAXParserFactory; import org.jboss.logging.Logger; import org.keycloak.Config; import org.keycloak.connections.jpa.updater.liquibase.conn.LiquibaseConnectionProvider; import org.keycloak.connections.jpa.updater.liquibase.conn.LiquibaseConnectionProviderFactory; import org.keycloak.models.KeycloakSession; import org.keycloak.models.KeycloakSessionFactory; import liquibase.Liquibase; import liquibase.database.Database; import liquibase.database.DatabaseFactory; import liquibase.database.jvm.JdbcConnection; import liquibase.exception.LiquibaseException; import liquibase.parser.ChangeLogParser; import liquibase.parser.ChangeLogParserFactory; import liquibase.parser.core.xml.XMLChangeLogSAXParser; import liquibase.resource.ClassLoaderResourceAccessor; import liquibase.resource.ResourceAccessor; public class QuarkusLiquibaseConnectionProvider implements LiquibaseConnectionProviderFactory, LiquibaseConnectionProvider { private static final Logger logger = Logger.getLogger(QuarkusLiquibaseConnectionProvider.class); private volatile boolean initialized = false; private ClassLoaderResourceAccessor resourceAccessor; @Override public LiquibaseConnectionProvider create(KeycloakSession session) { if (!initialized) { synchronized (this) { if (!initialized) { baseLiquibaseInitialization(session); initialized = true; } } } return this; } protected void baseLiquibaseInitialization(KeycloakSession session) { resourceAccessor = new ClassLoaderResourceAccessor(getClass().getClassLoader()); // disables XML validation for (ChangeLogParser parser : ChangeLogParserFactory.getInstance().getParsers()) { if (parser instanceof XMLChangeLogSAXParser) { Method getSaxParserFactory = null; try { getSaxParserFactory = XMLChangeLogSAXParser.class.getDeclaredMethod("getSaxParserFactory"); getSaxParserFactory.setAccessible(true); SAXParserFactory saxParserFactory = (SAXParserFactory) getSaxParserFactory.invoke(parser); saxParserFactory.setValidating(false); saxParserFactory.setSchema(null); } catch (Exception e) { logger.warnf("Failed to disable liquibase XML validations"); } finally { if (getSaxParserFactory != null) { getSaxParserFactory.setAccessible(false); } } } } } @Override public void init(Config.Scope config) { } @Override public void postInit(KeycloakSessionFactory factory) { } @Override public void close() { } @Override public String getId() { return "quarkus"; } @Override public Liquibase getLiquibase(Connection connection, String defaultSchema) throws LiquibaseException { Database database = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(new JdbcConnection(connection)); if (defaultSchema != null) { database.setDefaultSchemaName(defaultSchema); } String changelog = QuarkusJpaUpdaterProvider.CHANGELOG; logger.debugf("Using changelog file %s and changelogTableName %s", changelog, database.getDatabaseChangeLogTableName()); return new Liquibase(changelog, resourceAccessor, database); } @Override public Liquibase getLiquibaseForCustomUpdate(Connection connection, String defaultSchema, String changelogLocation, ClassLoader classloader, String changelogTableName) throws LiquibaseException { Database database = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(new JdbcConnection(connection)); if (defaultSchema != null) { database.setDefaultSchemaName(defaultSchema); } ResourceAccessor resourceAccessor = new ClassLoaderResourceAccessor(classloader); database.setDatabaseChangeLogTableName(changelogTableName); logger.debugf("Using changelog file %s and changelogTableName %s", changelogLocation, database.getDatabaseChangeLogTableName()); return new Liquibase(changelogLocation, resourceAccessor, database); } @Override public int order() { return 100; } }
Java
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_45) on Fri Aug 28 09:51:25 EDT 2015 --> <title>Cassandra.AsyncClient.atomic_batch_mutate_call (apache-cassandra API)</title> <meta name="date" content="2015-08-28"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Cassandra.AsyncClient.atomic_batch_mutate_call (apache-cassandra API)"; } } catch(err) { } //--> var methods = {"i0":10,"i1":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; var activeTableTab = "activeTableTab"; </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="class-use/Cassandra.AsyncClient.atomic_batch_mutate_call.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.add_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Prev&nbsp;Class</span></a></li> <li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.batch_mutate_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Next&nbsp;Class</span></a></li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">Frames</a></li> <li><a href="Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li><a href="#nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Nested</a>&nbsp;|&nbsp;</li> <li><a href="#fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Field</a>&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <!-- ======== START OF CLASS DATA ======== --> <div class="header"> <div class="subTitle">org.apache.cassandra.thrift</div> <h2 title="Class Cassandra.AsyncClient.atomic_batch_mutate_call" class="title">Class Cassandra.AsyncClient.atomic_batch_mutate_call</h2> </div> <div class="contentContainer"> <ul class="inheritance"> <li>java.lang.Object</li> <li> <ul class="inheritance"> <li>org.apache.thrift.async.TAsyncMethodCall</li> <li> <ul class="inheritance"> <li>org.apache.cassandra.thrift.Cassandra.AsyncClient.atomic_batch_mutate_call</li> </ul> </li> </ul> </li> </ul> <div class="description"> <ul class="blockList"> <li class="blockList"> <dl> <dt>Enclosing class:</dt> <dd><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.html" title="class in org.apache.cassandra.thrift">Cassandra.AsyncClient</a></dd> </dl> <hr> <br> <pre>public static class <span class="typeNameLabel">Cassandra.AsyncClient.atomic_batch_mutate_call</span> extends org.apache.thrift.async.TAsyncMethodCall</pre> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <!-- ======== NESTED CLASS SUMMARY ======== --> <ul class="blockList"> <li class="blockList"><a name="nested.class.summary"> <!-- --> </a> <h3>Nested Class Summary</h3> <ul class="blockList"> <li class="blockList"><a name="nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall"> <!-- --> </a> <h3>Nested classes/interfaces inherited from class&nbsp;org.apache.thrift.async.TAsyncMethodCall</h3> <code>org.apache.thrift.async.TAsyncMethodCall.State</code></li> </ul> </li> </ul> <!-- =========== FIELD SUMMARY =========== --> <ul class="blockList"> <li class="blockList"><a name="field.summary"> <!-- --> </a> <h3>Field Summary</h3> <ul class="blockList"> <li class="blockList"><a name="fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall"> <!-- --> </a> <h3>Fields inherited from class&nbsp;org.apache.thrift.async.TAsyncMethodCall</h3> <code>client, transport</code></li> </ul> </li> </ul> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor.summary"> <!-- --> </a> <h3>Constructor Summary</h3> <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> <caption><span>Constructors</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colOne" scope="col">Constructor and Description</th> </tr> <tr class="altColor"> <td class="colOne"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#atomic_batch_mutate_call-java.util.Map-org.apache.cassandra.thrift.ConsistencyLevel-org.apache.thrift.async.AsyncMethodCallback-org.apache.thrift.async.TAsyncClient-org.apache.thrift.protocol.TProtocolFactory-org.apache.thrift.transport.TNonblockingTransport-">atomic_batch_mutate_call</a></span>(java.util.Map&lt;java.nio.ByteBuffer,java.util.Map&lt;java.lang.String,java.util.List&lt;<a href="../../../../org/apache/cassandra/thrift/Mutation.html" title="class in org.apache.cassandra.thrift">Mutation</a>&gt;&gt;&gt;&nbsp;mutation_map, <a href="../../../../org/apache/cassandra/thrift/ConsistencyLevel.html" title="enum in org.apache.cassandra.thrift">ConsistencyLevel</a>&nbsp;consistency_level, org.apache.thrift.async.AsyncMethodCallback&nbsp;resultHandler, org.apache.thrift.async.TAsyncClient&nbsp;client, org.apache.thrift.protocol.TProtocolFactory&nbsp;protocolFactory, org.apache.thrift.transport.TNonblockingTransport&nbsp;transport)</code>&nbsp;</td> </tr> </table> </li> </ul> <!-- ========== METHOD SUMMARY =========== --> <ul class="blockList"> <li class="blockList"><a name="method.summary"> <!-- --> </a> <h3>Method Summary</h3> <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> <caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd">&nbsp;</span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd">&nbsp;</span></span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colLast" scope="col">Method and Description</th> </tr> <tr id="i0" class="altColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#getResult--">getResult</a></span>()</code>&nbsp;</td> </tr> <tr id="i1" class="rowColor"> <td class="colFirst"><code>void</code></td> <td class="colLast"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#write_args-org.apache.thrift.protocol.TProtocol-">write_args</a></span>(org.apache.thrift.protocol.TProtocol&nbsp;prot)</code>&nbsp;</td> </tr> </table> <ul class="blockList"> <li class="blockList"><a name="methods.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall"> <!-- --> </a> <h3>Methods inherited from class&nbsp;org.apache.thrift.async.TAsyncMethodCall</h3> <code>getClient, getFrameBuffer, getSequenceId, getStartTime, getState, getTimeoutTimestamp, hasTimeout, isFinished, onError, prepareMethodCall, transition</code></li> </ul> <ul class="blockList"> <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object"> <!-- --> </a> <h3>Methods inherited from class&nbsp;java.lang.Object</h3> <code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li> </ul> </li> </ul> </li> </ul> </div> <div class="details"> <ul class="blockList"> <li class="blockList"> <!-- ========= CONSTRUCTOR DETAIL ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor.detail"> <!-- --> </a> <h3>Constructor Detail</h3> <a name="atomic_batch_mutate_call-java.util.Map-org.apache.cassandra.thrift.ConsistencyLevel-org.apache.thrift.async.AsyncMethodCallback-org.apache.thrift.async.TAsyncClient-org.apache.thrift.protocol.TProtocolFactory-org.apache.thrift.transport.TNonblockingTransport-"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>atomic_batch_mutate_call</h4> <pre>public&nbsp;atomic_batch_mutate_call(java.util.Map&lt;java.nio.ByteBuffer,java.util.Map&lt;java.lang.String,java.util.List&lt;<a href="../../../../org/apache/cassandra/thrift/Mutation.html" title="class in org.apache.cassandra.thrift">Mutation</a>&gt;&gt;&gt;&nbsp;mutation_map, <a href="../../../../org/apache/cassandra/thrift/ConsistencyLevel.html" title="enum in org.apache.cassandra.thrift">ConsistencyLevel</a>&nbsp;consistency_level, org.apache.thrift.async.AsyncMethodCallback&nbsp;resultHandler, org.apache.thrift.async.TAsyncClient&nbsp;client, org.apache.thrift.protocol.TProtocolFactory&nbsp;protocolFactory, org.apache.thrift.transport.TNonblockingTransport&nbsp;transport) throws org.apache.thrift.TException</pre> <dl> <dt><span class="throwsLabel">Throws:</span></dt> <dd><code>org.apache.thrift.TException</code></dd> </dl> </li> </ul> </li> </ul> <!-- ============ METHOD DETAIL ========== --> <ul class="blockList"> <li class="blockList"><a name="method.detail"> <!-- --> </a> <h3>Method Detail</h3> <a name="write_args-org.apache.thrift.protocol.TProtocol-"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>write_args</h4> <pre>public&nbsp;void&nbsp;write_args(org.apache.thrift.protocol.TProtocol&nbsp;prot) throws org.apache.thrift.TException</pre> <dl> <dt><span class="overrideSpecifyLabel">Specified by:</span></dt> <dd><code>write_args</code>&nbsp;in class&nbsp;<code>org.apache.thrift.async.TAsyncMethodCall</code></dd> <dt><span class="throwsLabel">Throws:</span></dt> <dd><code>org.apache.thrift.TException</code></dd> </dl> </li> </ul> <a name="getResult--"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>getResult</h4> <pre>public&nbsp;void&nbsp;getResult() throws <a href="../../../../org/apache/cassandra/thrift/InvalidRequestException.html" title="class in org.apache.cassandra.thrift">InvalidRequestException</a>, <a href="../../../../org/apache/cassandra/thrift/UnavailableException.html" title="class in org.apache.cassandra.thrift">UnavailableException</a>, <a href="../../../../org/apache/cassandra/thrift/TimedOutException.html" title="class in org.apache.cassandra.thrift">TimedOutException</a>, org.apache.thrift.TException</pre> <dl> <dt><span class="throwsLabel">Throws:</span></dt> <dd><code><a href="../../../../org/apache/cassandra/thrift/InvalidRequestException.html" title="class in org.apache.cassandra.thrift">InvalidRequestException</a></code></dd> <dd><code><a href="../../../../org/apache/cassandra/thrift/UnavailableException.html" title="class in org.apache.cassandra.thrift">UnavailableException</a></code></dd> <dd><code><a href="../../../../org/apache/cassandra/thrift/TimedOutException.html" title="class in org.apache.cassandra.thrift">TimedOutException</a></code></dd> <dd><code>org.apache.thrift.TException</code></dd> </dl> </li> </ul> </li> </ul> </li> </ul> </div> </div> <!-- ========= END OF CLASS DATA ========= --> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="class-use/Cassandra.AsyncClient.atomic_batch_mutate_call.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.add_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Prev&nbsp;Class</span></a></li> <li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.batch_mutate_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Next&nbsp;Class</span></a></li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">Frames</a></li> <li><a href="Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li><a href="#nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Nested</a>&nbsp;|&nbsp;</li> <li><a href="#fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Field</a>&nbsp;|&nbsp;</li> <li><a href="#constructor.summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor.detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method.detail">Method</a></li> </ul> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &copy; 2015 The Apache Software Foundation</small></p> </body> </html>
Java
package org.apereo.cas.web.flow.actions; import org.apereo.cas.authentication.CoreAuthenticationTestUtils; import org.apereo.cas.authentication.principal.ResponseBuilderLocator; import org.apereo.cas.authentication.principal.WebApplicationService; import org.apereo.cas.authentication.principal.WebApplicationServiceResponseBuilder; import org.apereo.cas.config.CasCoreServicesConfiguration; import org.apereo.cas.config.CasCoreUtilConfiguration; import org.apereo.cas.services.ServicesManager; import org.apereo.cas.web.flow.CasWebflowConstants; import org.apereo.cas.web.support.WebUtils; import lombok.val; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.cloud.autoconfigure.RefreshAutoConfiguration; import org.springframework.mock.web.MockHttpServletRequest; import org.springframework.mock.web.MockHttpServletResponse; import org.springframework.mock.web.MockServletContext; import org.springframework.test.context.junit4.rules.SpringClassRule; import org.springframework.test.context.junit4.rules.SpringMethodRule; import org.springframework.webflow.context.servlet.ServletExternalContext; import org.springframework.webflow.test.MockRequestContext; import static org.junit.Assert.*; import static org.mockito.Mockito.*; /** * This is {@link RedirectToServiceActionTests}. * * @author Misagh Moayyed * @since 5.3.0 */ @SpringBootTest(classes = { RefreshAutoConfiguration.class, CasCoreServicesConfiguration.class, CasCoreUtilConfiguration.class }) public class RedirectToServiceActionTests { @ClassRule public static final SpringClassRule SPRING_CLASS_RULE = new SpringClassRule(); @Rule public final SpringMethodRule springMethodRule = new SpringMethodRule(); @Autowired @Qualifier("servicesManager") private ServicesManager servicesManager; @Test public void verifyAction() throws Exception { val context = new MockRequestContext(); val request = new MockHttpServletRequest(); context.setExternalContext(new ServletExternalContext(new MockServletContext(), request, new MockHttpServletResponse())); WebUtils.putAuthentication(CoreAuthenticationTestUtils.getAuthentication(), context); WebUtils.putService(context, CoreAuthenticationTestUtils.getWebApplicationService()); val locator = mock(ResponseBuilderLocator.class); when(locator.locate(any(WebApplicationService.class))).thenReturn(new WebApplicationServiceResponseBuilder(this.servicesManager)); val redirectToServiceAction = new RedirectToServiceAction(locator); val event = redirectToServiceAction.execute(context); assertEquals(CasWebflowConstants.TRANSITION_ID_REDIRECT, event.getId()); } }
Java
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8" > <title>Berlin 2013 - Proposal</title> <meta name="author" content="Tim Lossen" > <link rel="alternate" type="application/rss+xml" title="devopsdays RSS Feed" href="http://www.devopsdays.org/feed/" > <script type="text/javascript" src="https://www.google.com/jsapi"></script> <script type="text/javascript"> google.load('jquery', '1.3.2'); </script> <!---This is a combined jAmpersand, jqwindont , jPullquote --> <script type="text/javascript" src="/js/devops.js"></script> <!--- Blueprint CSS Framework Screen + Fancytype-Screen + jedi.css --> <link rel="stylesheet" href="/css/devops.min.css" type="text/css" media="screen, projection"> <link rel="stylesheet" href="/css/blueprint/print.css" type="text/css" media="print"> <!--[if IE]> <link rel="stylesheet" href="/css/blueprint/ie.css" type="text/css" media="screen, projection"> <![endif]--> </head> <body onload="initialize()"> <div class="container "> <div class="span-24 last" id="header"> <div class="span-16 first"> <img src="/images/devopsdays-banner.png" title="devopsdays banner" width="801" height="115" alt="devopdays banner" ><br> </div> <div class="span-8 last"> </div> </div> <div class="span-24 last"> <div class="span-15 first"> <div id="headermenu"> <table > <tr> <td> <a href="/"><img alt="home" title="home" src="/images/home.png"></a> <a href="/">Home</a> </td> <td> <a href="/contact/"><img alt="contact" title="contact" src="/images/contact.png"></a> <a href="/contact/">Contact</a> </td> <td> <a href="/events/"><img alt="events" title="events" src="/images/events.png"></a> <a href="/events/">Events</a> </td> <td> <a href="/presentations/"><img alt="presentations" title="presentations" src="/images/presentations.png"></a> <a href="/presentations/">Presentations</a> </td> <td> <a href="/blog/"><img alt="blog" title="blog" src="/images/blog.png"></a> <a href="/blog/">Blog</a> </td> </tr> </table> </div> </div> <div class="span-8 last"> </div> <div class="span-24 last" id="title"> <div class="span-15 first"> <h1>Berlin 2013 - Proposal </h1> </div> <div class="span-8 last"> </div> <h1>Gold sponsors</h1> </div> <div class="span-15 "> <div class="span-15 last "> <div class="submenu"> <h3> <a href="/events/2013-berlin/">welcome</a>&nbsp; <a href="/events/2013-berlin/propose">propose</a>&nbsp; <a href="/events/2013-berlin/program">program</a>&nbsp; <a href="/events/2013-berlin/location">location</a>&nbsp; <a href="/events/2013-berlin/registration">register</a>&nbsp; <a href="/events/2013-berlin/sponsor">sponsor</a>&nbsp; <a href="/events/2013-berlin/contact">contact</a>&nbsp; </h3> </div> Back to <a href='..'>proposals overview</a> - <a href='../../program'>program</a> <hr> <h3>Podularity FTW!</h3> <p><strong>Abstract:</strong></p> <p>In "The Connected Company" (O'Reilly, 2012), Dave Gray describes how pods -- "small, autonomous units" -- are the basic building blocks of a flexible, scalable and resilient organisation. At Wooga, we have used pods (though we call them "game teams") as the main organizational unit from the beginning, and this has led quite naturally to a strong devops culture.</p> <p>In this talk, I will tell how this decision came about, how it has played out, which practical benefits the approach provides -- but also, what problems we have run into. As a bonus, I will report on a recent attempt to use podularity as the technical system architecture, where scalability and resilience are major concerns as well.</p> <p><strong>Speaker:</strong></p> <p>Tim Lossen</p> </div> <div class="span-15 first last"> <script type="text/javascript"> // var disqus_developer = 1; </script> <div id="disqus_thread"></div> <script type="text/javascript"> var disqus_shortname = 'devopsdays'; (function() { var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; dsq.src = 'http://' + disqus_shortname + '.disqus.com/embed.js'; (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); })(); </script> <noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript> <a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a> <hr> </div> </div> <div class="span-8 last"> <div class="span-8 last"> <a href='http://www.zeroturnaround.com'><img border=0 alt='Zeroturnaround' title='Zeroturnaround' width=100px height=100px src='/events/2013-berlin/logos/zeroturnaround.png'></a> <a href='http://www8.hp.com/de/de/software-solutions/software.html?compURI=1234839&jumpid=reg_r1002_dede_c-001_title_r0001'><img border=0 alt='HP' title='HP' width=100px height=100px src='/events/2013-berlin/logos/hp.png'></a> <h1>Gold Sponsors</h1> <a href='http://www.gutefrage.net/'><img border=0 alt='Gutefrage' title='Gutefrage' width=100px height=100px src='/events/2013-berlin/logos/gutefrage.png'></a> <a href='http://developer.immobilienscout24.de/jobs/'><img border=0 alt='Immobilien Scout' title='Immobilien Scout' width=100px height=100px src='/events/2013-berlin/logos/immobilienscout.png'></a> <a href='http://www.unbelievable-machine.com'><img border=0 alt='The unbelievable Machine Company' title='The unbelievable Machine Company' width=100px height=100px src='/events/2013-berlin/logos/unbelievablemachine.png'></a> <a href='http://nokia.de'><img border=0 alt='Nokia Here' title='Nokia Here' width=100px height=100px src='/events/2013-berlin/logos/here.png'></a> <a href='https://www.engineyard.com/'><img border=0 alt='Engine Yard' title='Engine Yard' width=100px height=100px src='/events/2013-berlin/logos/engineyard.png'></a> <a href='http://www.idealo.de/'><img border=0 alt='Idealo' title='Idealo' width=100px height=100px src='/events/2013-berlin/logos/idealo.png'></a> <a href='http://www.netways.de/'><img border=0 alt='Netways' title='Netways' width=100px height=100px src='/events/2013-berlin/logos/netways.png'></a> <a href='http://www.axelspringer.de/karriere'><img border=0 alt='Axel Springer' title='Axel Springer' width=100px height=100px src='/events/2013-berlin/logos/axelspringer.png'></a> <h1>Silver sponsors</h1> <a href='http://www.innoq.com/'><img border=0 alt='InnoQ' title='InnoQ' width=100px height=100px src='/events/2013-berlin/logos/innoq.png'></a> <a href='http://www.friendscout24.com/'><img border=0 alt='FriendScout 24' title='FriendScout 24' width=100px height=100px src='/events/2013-berlin/logos/friendscout24.png'></a> <a href='http://www.serena.com/'><img border=0 alt='Serena' title='Serena' width=100px height=100px src='/events/2013-berlin/logos/serena.png'></a> <a href='http://www.cassini.de/'><img border=0 alt='Cassini' title='Cassini' width=100px height=100px src='/events/2013-berlin/logos/cassini.png'></a> <a href='http://www.leanovate.de/'><img border=0 alt='Leanovate' title='Leanovate' width=100px height=100px src='/events/2013-berlin/logos/leanovate.png'></a> <a href='http://www.it-agile.de/'><img border=0 alt='IT-Agile' title='IT-Agile' width=100px height=100px src='/events/2013-berlin/logos/itagile.png'></a> <a href='http://www.cloudbau.de/'><img border=0 alt='Cloudbau' title='Cloudbau' width=100px height=100px src='/events/2013-berlin/logos/cloudbau.png'></a> <a href='http://www.gsb.stanford.edu/ignite/paris'><img border=0 alt='Stanford Ignite Paris' title='Stanford Ignite Paris' width=100px height=100px src='/events/2013-berlin/logos/stanford.png'></a> <a href='http://www.tarent.com'><img border=0 alt='Tarent' title='Tarent' width=100px height=100px src='/events/2013-berlin/logos/tarent.png'></a> <a href='http://aws.amazon.com/opsworks/'><img border=0 alt='OpsWorks' title='OpsWorks' width=100px height=100px src='/events/2013-berlin/logos/opsworks.png'></a> <a href='http://www.epost.de/'><img border=0 alt='E-POST' title='E-POST' width=100px height=100px src='/events/2013-berlin/logos/epost.png'></a> <h1>Evening sponsors</h1> <a href='http://www.syseleven.com/'><img border=0 alt='SysEleven' title='SysEleven' width=100px height=100px src='/events/2013-berlin/logos/syseleven.png'></a> <a href='http://www.github.com/'><img border=0 alt='github' title='github' width=100px height=100px src='/events/2013-berlin/logos/github.png'></a> </div> <div class="span-8 last"> </div> </div> </div> </div> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-9713393-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> </body> </html>
Java
package org.camunda.bpm.engine.rest.wink; import org.camunda.bpm.engine.rest.AbstractMessageRestServiceTest; import org.camunda.bpm.engine.rest.util.WinkTomcatServerBootstrap; import org.junit.AfterClass; import org.junit.BeforeClass; public class MessageRestServiceTest extends AbstractMessageRestServiceTest { protected static WinkTomcatServerBootstrap serverBootstrap; @BeforeClass public static void setUpEmbeddedRuntime() { serverBootstrap = new WinkTomcatServerBootstrap(); serverBootstrap.start(); } @AfterClass public static void tearDownEmbeddedRuntime() { serverBootstrap.stop(); } }
Java
let connectionIdx = 0; let messageIdx = 0; function addConnection(connection) { connection.connectionId = ++connectionIdx; addMessage('New connection #' + connectionIdx); connection.addEventListener('message', function(event) { messageIdx++; const data = JSON.parse(event.data); const logString = 'Message ' + messageIdx + ' from connection #' + connection.connectionId + ': ' + data.message; addMessage(logString, data.lang); maybeSetFruit(data.message); connection.send('Received message ' + messageIdx); }); connection.addEventListener('close', function(event) { addMessage('Connection #' + connection.connectionId + ' closed, reason = ' + event.reason + ', message = ' + event.message); }); }; /* Utils */ const fruitEmoji = { 'grapes': '\u{1F347}', 'watermelon': '\u{1F349}', 'melon': '\u{1F348}', 'tangerine': '\u{1F34A}', 'lemon': '\u{1F34B}', 'banana': '\u{1F34C}', 'pineapple': '\u{1F34D}', 'green apple': '\u{1F35F}', 'apple': '\u{1F34E}', 'pear': '\u{1F350}', 'peach': '\u{1F351}', 'cherries': '\u{1F352}', 'strawberry': '\u{1F353}' }; function addMessage(content, language) { const listItem = document.createElement("li"); if (language) { listItem.lang = language; } listItem.textContent = content; document.querySelector("#message-list").appendChild(listItem); }; function maybeSetFruit(message) { const fruit = message.toLowerCase(); if (fruit in fruitEmoji) { document.querySelector('#main').textContent = fruitEmoji[fruit]; } }; document.addEventListener('DOMContentLoaded', function() { if (navigator.presentation.receiver) { navigator.presentation.receiver.connectionList.then(list => { list.connections.map(connection => addConnection(connection)); list.addEventListener('connectionavailable', function(event) { addConnection(event.connection); }); }); } });
Java
/* * Copyright 2013 Steve Vickers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Created on: Jun 15, 2014 */ package reactivemongo.extensions.dsl.criteria import org.scalatest._ import org.scalatest.matchers._ import reactivemongo.bson._ /** * The '''UntypedWhereSpec''' type verifies the behaviour expected of the * `where` method in the [[reactivemongo.extensions.dsl.criteria.Untyped]] * `type`. * * @author svickers * */ class UntypedWhereSpec extends FlatSpec with Matchers { /// Class Imports import Untyped._ "An Untyped where" should "support 1 placeholder" in { val q = where { _.a === 1 } BSONDocument.pretty(q) shouldBe ( BSONDocument.pretty( BSONDocument( "a" -> BSONInteger(1) ) ) ); } it should "support 2 placeholders" in { val q = where { _.a === 1 && _.b === 2 } BSONDocument.pretty(q) shouldBe ( BSONDocument.pretty( BSONDocument( "$and" -> BSONArray( BSONDocument( "a" -> BSONInteger(1) ), BSONDocument( "b" -> BSONInteger(2) ) ) ) ) ); } it should "support 3 placeholders" in { val q = where { _.a === 1 && _.b === 2 && _.c === 3 } BSONDocument.pretty(q) shouldBe ( BSONDocument.pretty( BSONDocument( "$and" -> BSONArray( BSONDocument( "a" -> BSONInteger(1) ), BSONDocument( "b" -> BSONInteger(2) ), BSONDocument( "c" -> BSONInteger(3) ) ) ) ) ); } /// The library supports from 1 to 22 placeholders for the where method. it should "support 22 placeholders" in { val q = where { _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 && _.p === 0 } BSONDocument.pretty(q) shouldBe ( BSONDocument.pretty( BSONDocument( "$and" -> BSONArray(List.fill(22)(BSONDocument("p" -> BSONInteger(0)))) ) ) ); } }
Java
/* * JBoss, Home of Professional Open Source * Copyright 2014, Red Hat, Inc. and/or its affiliates, and individual * contributors by the @authors tag. See the copyright.txt in the * distribution for a full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.as.quickstart.deltaspike.partialbean; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import javax.enterprise.context.ApplicationScoped; /** * This class implements a dynamic DeltaSpike Partial Bean. It is bound to * one or more abstract classes or interfaces via the Binding Annotation * (@ExamplePartialBeanBinding below). * * All abstract, unimplemented methods from those beans will be implemented * via the invoke method. * */ @ExamplePartialBeanBinding @ApplicationScoped public class ExamplePartialBeanImplementation implements InvocationHandler { /** * In our example, this method will be invoked when the "sayHello" method is called. * * @param proxy The object upon which the method is being invoked. * @param method The method being invoked (sayHello in this QuickStart) * @param args The arguments being passed in to the invoked method */ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { return "Hello " + args[0]; } }
Java
/* * JBoss, Home of Professional Open Source * Copyright 2010, Red Hat, Inc. and/or its affiliates, and individual contributors * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hibernate.validator.cfg.defs; import javax.validation.constraints.Pattern; import org.hibernate.validator.cfg.ConstraintDef; import org.hibernate.validator.constraints.Email; /** * @author Hardy Ferentschik */ public class EmailDef extends ConstraintDef<EmailDef, Email> { public EmailDef() { super( Email.class ); } public EmailDef regexp(String regexp) { addParameter( "regexp", regexp ); return this; } public EmailDef flags(Pattern.Flag... flags) { addParameter( "flags", flags ); return this; } }
Java
/* Copyright IBM Corp. 2016 All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package common import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/core/util" ) func (b *BlockHeader) Hash() []byte { data, err := proto.Marshal(b) // XXX this is wrong, protobuf is not the right mechanism to serialize for a hash if err != nil { panic("This should never fail and is generally irrecoverable") } return util.ComputeCryptoHash(data) } func (b *BlockData) Hash() []byte { data, err := proto.Marshal(b) // XXX this is wrong, protobuf is not the right mechanism to serialize for a hash, AND, it is not a MerkleTree hash if err != nil { panic("This should never fail and is generally irrecoverable") } return util.ComputeCryptoHash(data) }
Java
<?php /* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * GENERATED CODE WARNING * Generated by gapic-generator-php from the file * https://github.com/googleapis/googleapis/blob/master/google/cloud/accessapproval/v1/accessapproval.proto * Updates to the above are reflected here through a refresh process. */ namespace Google\Cloud\AccessApproval\V1; use Google\Cloud\AccessApproval\V1\Gapic\AccessApprovalGapicClient; /** {@inheritdoc} */ class AccessApprovalClient extends AccessApprovalGapicClient { // This class is intentionally empty, and is intended to hold manual additions to // the generated {@see AccessApprovalGapicClient} class. }
Java
/* * Copyright (c) 2010. All rights reserved. */ package ro.isdc.wro.model.resource.processor; import static org.junit.Assert.assertEquals; import java.io.File; import java.net.URL; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import ro.isdc.wro.config.Context; import ro.isdc.wro.model.resource.ResourceType; import ro.isdc.wro.model.resource.processor.impl.css.ConformColorsCssProcessor; import ro.isdc.wro.util.WroTestUtils; /** * TestConformColorsCssProcessor. * * @author Alex Objelean * @created Created on Aug 15, 2010 */ public class TestConformColorsCssProcessor { private ResourcePreProcessor processor; @BeforeClass public static void onBeforeClass() { assertEquals(0, Context.countActive()); } @AfterClass public static void onAfterClass() { assertEquals(0, Context.countActive()); } @Before public void setUp() { processor = new ConformColorsCssProcessor(); } @Test public void testFromFolder() throws Exception { final URL url = getClass().getResource("conformColors"); final File testFolder = new File(url.getFile(), "test"); final File expectedFolder = new File(url.getFile(), "expected"); WroTestUtils.compareFromDifferentFoldersByExtension(testFolder, expectedFolder, "css", processor); } @Test public void shouldSupportCorrectResourceTypes() { WroTestUtils.assertProcessorSupportResourceTypes(processor, ResourceType.CSS); } }
Java
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h" #include "tensorflow/core/grappler/optimizers/constant_folding.h" #include "tensorflow/core/grappler/optimizers/model_pruner.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/grappler/utils/grappler_test.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace grappler { namespace { constexpr char kHoistFactorOptimizerDiv[] = "ArithmeticOptimizer/HoistCommonFactor_Div_"; constexpr char kHoistFactorOptimizerMul[] = "ArithmeticOptimizer/HoistCommonFactor_Mul_"; constexpr char kHoistFactorOptimizerAdd[] = "ArithmeticOptimizer/HoistCommonFactor_Add_"; constexpr char kSimplifyAggregationConst[] = "ArithmeticOptimizer/SimplifyAggregation_Const_"; constexpr char kSimplifyAggregationMul[] = "ArithmeticOptimizer/SimplifyAggregation_Mul_"; // Optimized name of outer Mul node by HoistCommonFactorOutOfAggregation. string HoistMulName(const string& name) { return AddPrefixToNodeName(name, kHoistFactorOptimizerMul, ""); } // Optimized name of outer Div node by HoistCommonFactorOutOfAggregation. string HoistDivName(const string& name) { return AddPrefixToNodeName(name, kHoistFactorOptimizerDiv, ""); } // Optimized name of inner Add node by HoistCommonFactorOutOfAggregation. string HoistAddName(const string& name) { return AddPrefixToNodeName(name, kHoistFactorOptimizerAdd, ""); } // Optimized name of Const node by SimplifyAggregation. string AggregationConstName(const string& name) { return AddPrefixToNodeName(name, kSimplifyAggregationConst, ""); } // Optimized name of Mul node by SimplifyAggregation. string AggregationMulName(const string& name) { return AddPrefixToNodeName(name, kSimplifyAggregationMul, ""); } string OptimizedName(const string& name) { return AddPrefixToNodeName(name, kArithmeticOptimizer); } void VerifyGraphsMatch(const GraphDef& original_graph, const GraphDef& optimized_graph, int line) { EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line; for (int i = 0; i < original_graph.node_size(); ++i) { const NodeDef& original = original_graph.node(i); const NodeDef& optimized = optimized_graph.node(i); EXPECT_EQ(original.name(), optimized.name()) << line; EXPECT_EQ(original.op(), optimized.op()) << line; EXPECT_EQ(original.input_size(), optimized.input_size()) << line; for (int j = 0; j < original.input_size(); ++j) { EXPECT_EQ(original.input(j), optimized.input(j)) << line; } } } } // namespace class ArithmeticOptimizerTest : public GrapplerTest { protected: // Optimize a graph using ArithmeticOptimizer and prune all the nodes that no // longer have any output consumers. void OptimizeAndPrune(ArithmeticOptimizer* optimizer, GrapplerItem* item, GraphDef* output) { TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); item->graph.Swap(output); output->Clear(); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, *item, output)); } // Run ArithmeticOptimizer twice to make sure the rewrite is idempotent. void OptimizeTwice(ArithmeticOptimizer* optimizer, GrapplerItem* item, GraphDef* output) { TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); item->graph.Swap(output); output->Clear(); TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); } // Run ArithmeticOptimizer twice to make sure the rewrite is idempotent. // Optionally run a constant folding pass before pruning. void OptimizeTwiceAndPrune(ArithmeticOptimizer* optimizer, GrapplerItem* item, GraphDef* output, bool const_folding = false) { TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); item->graph.Swap(output); output->Clear(); TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); if (const_folding) { item->graph.Swap(output); output->Clear(); TF_EXPECT_OK(ConstantFolding(/*cpu_device=*/nullptr) .Optimize(nullptr, *item, output)); } item->graph.Swap(output); output->Clear(); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, *item, output)); } // TODO(ezhulenev): Make private. After migration to stages each test // should explicitly enable required optimization for tests isolation void DisableAllStages(ArithmeticOptimizer* optimizer) { ArithmeticOptimizer::ArithmeticOptimizerOptions options; options.dedup_computations = false; options.combine_add_to_addn = false; options.convert_sqrt_div_to_rsqrt_mul = false; options.convert_pow = false; options.convert_log1p = false; options.optimize_max_or_min_of_monotonic = false; options.fold_conjugate_into_transpose = false; options.fold_multiply_into_conv = false; options.fold_transpose_into_matmul = false; options.hoist_common_factor_out_of_aggregation = false; options.hoist_cwise_unary_chains = false; options.minimize_broadcasts = false; options.remove_identity_transpose = false; options.remove_involution = false; options.remove_idempotent = false; options.remove_redundant_bitcast = false; options.remove_redundant_cast = false; options.remove_redundant_reshape = false; options.remove_negation = false; options.remove_logical_not = false; options.reorder_cast_and_transpose = false; options.replace_mul_with_square = false; options.simplify_aggregation = false; options.unary_ops_composition = false; optimizer->options_ = options; } void DisableAddToAddNCombining(ArithmeticOptimizer* optimizer) { optimizer->options_.combine_add_to_addn = false; } void EnableOnlyAddToAddNCombining(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.combine_add_to_addn = true; } void EnableOnlyFoldConjugateIntoTranspose(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.fold_conjugate_into_transpose = true; } void EnableOnlyFoldMultipleIntoConv(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.fold_multiply_into_conv = true; } void EnableOnlyFoldTransposeIntoMatMul(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.fold_transpose_into_matmul = true; } void EnableOnlyHoistCommonFactor(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.hoist_common_factor_out_of_aggregation = true; } void EnableOnlyMinimizeBroadcasts(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.minimize_broadcasts = true; } void EnableOnlyRemoveIdentityTranspose(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_identity_transpose = true; } void EnableOnlyRemoveInvolution(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_involution = true; } void EnableOnlyRemoveRedundantBitcast(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_redundant_bitcast = true; } void EnableOnlyRemoveRedundantCast(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_redundant_cast = true; } void EnableOnlyRemoveRedundantReshape(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_redundant_reshape = true; } void EnableOnlyRemoveNegation(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_negation = true; } void EnableOnlyReorderCastAndTranspose(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.reorder_cast_and_transpose = true; } void EnableOnlyReplaceMulWithSquare(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.replace_mul_with_square = true; } void EnableOnlyHoistCWiseUnaryChains(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.hoist_cwise_unary_chains = true; } void EnableOnlySqrtDivToRsqrtMul(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.convert_sqrt_div_to_rsqrt_mul = true; } void EnableOnlyConvertPow(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.convert_pow = true; } void EnableOnlyRemoveIdempotent(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_idempotent = true; } void EnableOnlyRemoveLogicalNot(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.remove_logical_not = true; } void EnableOnlySimplifyAggregation(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.simplify_aggregation = true; } void EnableOnlyLog1p(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.convert_log1p = true; } void EnableOnlyOptimizeMaxOrMinOfMonotonic(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.optimize_max_or_min_of_monotonic = true; } void EnableOnlyExpm1(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.convert_expm1 = true; } void EnableOnlyUnaryOpsComposition(ArithmeticOptimizer* optimizer) { DisableAllStages(optimizer); optimizer->options_.unary_ops_composition = true; } }; TEST_F(ArithmeticOptimizerTest, NoOp) { // This trivial graph is so basic there's nothing to optimize. TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"}); GrapplerItem item; CHECK(fake_input.NextItem(&item)); ArithmeticOptimizer optimizer; GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); VerifyGraphsMatch(item.graph, output, __LINE__); } TEST_F(ArithmeticOptimizerTest, OpDedupping) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c1 = ops::Const(s.WithOpName("c1"), {3.14, 2.7}, {1, 2}); Output c2 = ops::Const(s.WithOpName("c2"), {3.14, 2.7}, {1, 2}); Output div = ops::Div(s.WithOpName("div"), c1, c2); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"div"}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(2, output.node_size()); const NodeDef* new_c1 = node_map.GetNode("c1"); ASSERT_NE(new_c1, nullptr); const NodeDef* new_div = node_map.GetNode("div"); ASSERT_NE(new_div, nullptr); EXPECT_EQ(2, new_div->input_size()); EXPECT_EQ("c1", new_div->input(0)); EXPECT_EQ("c1", new_div->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, OpDeduppingAssertAndCheckNumerics) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output p = ops::Placeholder(s, DT_BOOL, ops::Placeholder::Shape({})); Output c = ops::Const(s.WithOpName("c"), {3.14, 2.7}, {1, 2}); auto check1 = ops::CheckNumerics(s.WithOpName("check1"), c, "foo"); auto check2 = ops::CheckNumerics(s.WithOpName("check2"), c, "foo"); auto assert1 = ops::Assert(s.WithOpName("assert1"), p, {c}); auto assert2 = ops::Assert(s.WithOpName("assert2"), p, {c}); Output div = ops::Div(s.WithOpName("div").WithControlDependencies( {assert1.operation, assert2.operation}), check1, check2); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"div"}; Tensor bool_t(DT_BOOL, TensorShape({})); bool_t.scalar<bool>().setConstant(true); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}}); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(5, output.node_size()); const NodeDef* new_div = node_map.GetNode("div"); ASSERT_NE(new_div, nullptr); EXPECT_EQ(4, new_div->input_size()); EXPECT_EQ("check1", new_div->input(0)); EXPECT_EQ("check1", new_div->input(1)); EXPECT_EQ("^assert1", new_div->input(2)); EXPECT_EQ("^assert1", new_div->input(3)); auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, OpDedupCommutative) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c1 = ops::Const(s.WithOpName("c1"), {1.0f, 2.0f}, {1, 2}); Output c2 = ops::Const(s.WithOpName("c2"), {3.0f, 4.0f}, {1, 2}); Output mul1 = ops::Mul(s.WithOpName("mul1"), c1, c2); Output mul2 = ops::Mul(s.WithOpName("mul2"), c2, c1); Output div1 = ops::Div(s.WithOpName("div1"), mul1, mul2); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.fetch = {"div1"}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(4, output.node_size()); const NodeDef* new_c1 = node_map.GetNode("c1"); ASSERT_NE(new_c1, nullptr); const NodeDef* new_c2 = node_map.GetNode("c2"); ASSERT_NE(new_c2, nullptr); const NodeDef* new_mul1 = node_map.GetNode("mul1"); ASSERT_NE(new_mul1, nullptr); EXPECT_EQ(2, new_mul1->input_size()); EXPECT_EQ("c1", new_mul1->input(0)); EXPECT_EQ("c2", new_mul1->input(1)); const NodeDef* new_div1 = node_map.GetNode("div1"); ASSERT_NE(new_div1, nullptr); EXPECT_EQ(2, new_div1->input_size()); EXPECT_EQ("mul1", new_div1->input(0)); EXPECT_EQ("mul1", new_div1->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2}); Output d = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2}); Output mul = ops::Mul(s.WithControlDependencies(d).WithOpName("mul"), c, c); Output id = ops::Identity(s.WithOpName("id"), mul); GrapplerItem item; item.fetch = {"id"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyReplaceMulWithSquare(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); EXPECT_EQ(4, output.node_size()); NodeMap node_map(&output); const string p = "ArithmeticOptimizer/ReplaceMulWithSquare"; const NodeDef* square_node = node_map.GetNode(strings::StrCat(p, "_", "mul")); ASSERT_NE(square_node, nullptr); EXPECT_EQ("Square", square_node->op()); EXPECT_EQ("c", square_node->input(0)); EXPECT_EQ("^d", square_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AdjacentNodes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2}); auto neg1 = ops::Neg(s.WithOpName("neg1"), c); auto neg2 = ops::Neg(s.WithOpName("neg2"), neg1); auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), neg2); auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), recip1); auto id = ops::Identity(s.WithOpName("id"), recip2); GrapplerItem item; item.fetch = {"id"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveInvolution(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // Negation and Reciprocal nodes cancelled each other. EXPECT_EQ(2, output.node_size()); EXPECT_EQ("id", output.node(1).name()); EXPECT_EQ("c", output.node(1).input(0)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AroundValuePreservingChain) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2}); auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c); auto id1 = ops::Identity(s.WithOpName("id1"), recip1); auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1); auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), squeeze); auto id2 = ops::Identity(s.WithOpName("id2"), recip2); std::vector<string> fetch = {"id2"}; GrapplerItem item; item.fetch = fetch; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveInvolution(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); // Check that Reciprocal nodes were removed from the graph. EXPECT_EQ(3, output.node_size()); // And const directly flows into squeeze. int found = 0; for (const NodeDef& node : output.node()) { if (node.name() == "squeeze") { EXPECT_EQ("c", node.input(0)); found++; } else if (node.name() == "id2") { EXPECT_EQ("squeeze", node.input(0)); found++; } } EXPECT_EQ(2, found); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveInvolution_SkipControlDependencies) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2}); auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c); auto id1 = ops::Identity(s.WithOpName("id1"), recip1); auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1); auto recip2 = ops::Reciprocal( s.WithOpName("recip2").WithControlDependencies(squeeze), c); auto id2 = ops::Identity(s.WithOpName("id2"), recip2); std::vector<string> fetch = {"id2"}; GrapplerItem item; item.fetch = fetch; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveInvolution(&optimizer); OptimizeTwice(&optimizer, &item, &output); // do not prune in this test // The optimizer should be a noop. VerifyGraphsMatch(item.graph, output, __LINE__); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output add = ops::Add(s.WithOpName("add"), x, x); Output id = ops::Identity(s.WithOpName("id"), add); GrapplerItem item; item.fetch = {"id"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(5, output.node_size()); const string optimized_const_name = AggregationConstName("add"); const string optimized_mul_name = AggregationMulName("add"); const NodeDef* new_const = node_map.GetNode(optimized_const_name); ASSERT_NE(new_const, nullptr); EXPECT_EQ("^x", new_const->input(0)); EXPECT_EQ(string("\0\0\0@", 4), new_const->attr().at("value").tensor().tensor_content()); const NodeDef* new_mul = node_map.GetNode(optimized_mul_name); ASSERT_NE(new_mul, nullptr); EXPECT_EQ(optimized_const_name, new_mul->input(0)); EXPECT_EQ("x", new_mul->input(1)); const NodeDef* new_id = node_map.GetNode("id"); ASSERT_NE(new_id, nullptr); EXPECT_EQ(optimized_mul_name, new_id->input(0)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2}); Output x = ops::Const(s.WithOpName("x"), {3.0f, 4.0f}, {1, 2}); Output add = ops::Add(s.WithOpName("add").WithControlDependencies(y), x, x); Output id = ops::Identity(s.WithOpName("id"), add); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); std::vector<string> fetch = {"id"}; auto tensors_expected = EvaluateNodes(item.graph, fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(6, output.node_size()); const string optimized_const_name = AggregationConstName("add"); const string optimized_mul_name = AggregationMulName("add"); const NodeDef* new_const = node_map.GetNode(optimized_const_name); ASSERT_NE(new_const, nullptr); EXPECT_EQ("^x", new_const->input(0)); EXPECT_EQ(string("\0\0\0@", 4), new_const->attr().at("value").tensor().tensor_content()); const NodeDef* new_mul = node_map.GetNode(optimized_mul_name); ASSERT_NE(new_mul, nullptr); EXPECT_EQ(optimized_const_name, new_mul->input(0)); EXPECT_EQ("x", new_mul->input(1)); EXPECT_EQ("^y", new_mul->input(2)); const NodeDef* new_id = node_map.GetNode("id"); ASSERT_NE(new_id, nullptr); EXPECT_EQ(optimized_mul_name, new_id->input(0)); auto tensors = EvaluateNodes(output, fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) { // Test case from b/69059093. tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output p = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({10, 10})); Output add = ops::Add(s.WithOpName("Add"), p, p); Output add1 = ops::Add(s.WithOpName("Add_1"), p, p); Output add4 = ops::Add(s.WithOpName("Add_4"), add, add1); Output add5 = ops::Add(s.WithOpName("Add_5"), add, add1); Output add6 = ops::Add(s.WithOpName("Add_6"), add4, add5); Output id = ops::Identity(s.WithOpName("id"), add6); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); const std::vector<string> devices{ "/device:CPU:0", "/device:GPU:0", "/device:CPU:0", "/device:GPU:1", "/device:CPU:0", "/device:CPU:0", "/device:CPU:0", }; for (int i = 0; i < item.graph.node_size(); ++i) { item.graph.mutable_node(i)->set_device(devices[i]); } ArithmeticOptimizer optimizer; DisableAddToAddNCombining(&optimizer); GraphDef output; OptimizeTwice(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // Mul(p, // Add_6(Add_4(Const(2), Const(2)), // Add_5(Const(2), Const(2)))) NodeMap node_map(&output); EXPECT_EQ(17, output.node_size()); const NodeDef* id_node = node_map.GetNode("id"); ASSERT_NE(id_node, nullptr); EXPECT_EQ(1, id_node->input_size()); EXPECT_EQ(HoistMulName("Add_6"), id_node->input(0)); const NodeDef* mul_node = node_map.GetNode(HoistMulName("Add_6")); ASSERT_NE(mul_node, nullptr); EXPECT_EQ(2, mul_node->input_size()); EXPECT_EQ("Placeholder", mul_node->input(0)); EXPECT_EQ(HoistAddName("Add_6"), mul_node->input(1)); const NodeDef* add_6_node = node_map.GetNode(HoistAddName("Add_6")); ASSERT_NE(add_6_node, nullptr); EXPECT_EQ(2, add_6_node->input_size()); EXPECT_EQ(HoistAddName("Add_4"), add_6_node->input(0)); EXPECT_EQ(HoistAddName("Add_5"), add_6_node->input(1)); const NodeDef* add_4_node = node_map.GetNode(HoistAddName("Add_4")); ASSERT_NE(add_4_node, nullptr); EXPECT_EQ("Add", add_4_node->op()); EXPECT_EQ(2, add_4_node->input_size()); EXPECT_EQ(AggregationConstName("Add"), add_4_node->input(0)); EXPECT_EQ(AggregationConstName("Add_1"), add_4_node->input(1)); const NodeDef* add_5_node = node_map.GetNode(HoistAddName("Add_5")); ASSERT_NE(add_5_node, nullptr); EXPECT_EQ("Add", add_5_node->op()); EXPECT_EQ(2, add_5_node->input_size()); EXPECT_EQ(AggregationConstName("Add"), add_5_node->input(0)); EXPECT_EQ(AggregationConstName("Add_1"), add_5_node->input(1)); const NodeDef* add_const_node = node_map.GetNode(AggregationConstName("Add")); ASSERT_NE(add_const_node, nullptr); EXPECT_EQ("Const", add_const_node->op()); EXPECT_EQ(1, add_const_node->input_size()); EXPECT_EQ("^Placeholder", add_const_node->input(0)); const NodeDef* add_1_const_node = node_map.GetNode(AggregationConstName("Add_1")); ASSERT_NE(add_1_const_node, nullptr); EXPECT_EQ("Const", add_1_const_node->op()); EXPECT_EQ(1, add_1_const_node->input_size()); EXPECT_EQ("^Placeholder", add_1_const_node->input(0)); } TEST_F(ArithmeticOptimizerTest, HoistFactorMul) { for (bool matching_shapes : {true, false}) { for (bool use_addn : {true, false}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output y1 = ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2}); Output y2 = matching_shapes ? ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2}) : ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1}); Output mul1 = ops::Mul(s.WithOpName("mul1"), x, y1); Output mul2 = ops::Mul(s.WithOpName("mul2"), y2, x); Output id = use_addn ? ops::Identity(s.WithOpName("id"), ops::AddN(s.WithOpName("add"), {mul1, mul2})) : ops::Identity(s.WithOpName("id"), ops::Add(s.WithOpName("add"), mul1, mul2)); GrapplerItem item; item.fetch = {"id"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; EnableOnlyHoistCommonFactor(&optimizer); GraphDef output; OptimizeTwice(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // Add Mul // / \ / \ // Mul Mul -> x Add // / \ / \ / \ // x y1 y2 x y1 y2 // // If "root" op is AddN and shapes does not match, this rewrite is not // possible and graph should stay intact. NodeMap node_map(&output); if (use_addn && !matching_shapes) { VerifyGraphsMatch(item.graph, output, __LINE__); } else { EXPECT_EQ(9, output.node_size()); const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add")); ASSERT_NE(new_add_node, nullptr) << "Hoisted Add node not found"; EXPECT_EQ("y1", new_add_node->input(0)); EXPECT_EQ("y2", new_add_node->input(1)); const NodeDef* new_mul_node = node_map.GetNode(HoistMulName("add")); ASSERT_NE(new_mul_node, nullptr) << "Hoisted Mul node not found"; EXPECT_EQ("x", new_mul_node->input(0)); EXPECT_EQ(new_add_node->name(), new_mul_node->input(1)); const NodeDef* id_node = node_map.GetNode("id"); ASSERT_NE(id_node, nullptr) << "Id node not found"; EXPECT_EQ("id", id_node->name()); EXPECT_EQ(HoistMulName("add"), id_node->input(0)); } auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } } } TEST_F(ArithmeticOptimizerTest, HoistFactorDiv) { for (bool matching_shapes : {true, false}) { for (bool use_addn : {true, false}) { for (bool use_ints : {true, false}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = use_ints ? ops::Const(s.WithOpName("x"), {1, 2}, {1, 2}) : ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output y1 = use_ints ? ops::Const(s.WithOpName("y1"), {3, 4}, {1, 2}) : ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2}); Output y2; if (matching_shapes) { y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5, 6}, {1, 2}) : ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2}); } else { y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5}, {1, 1}) : ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1}); } Output div1 = ops::Div(s.WithOpName("div1"), y1, x); Output div2 = ops::Div(s.WithOpName("div2"), y2, x); Output id = use_addn ? ops::Identity(s.WithOpName("id"), ops::AddN(s.WithOpName("add"), {div1, div2})) : ops::Identity(s.WithOpName("id"), ops::Add(s.WithOpName("add"), div1, div2)); GrapplerItem item; item.fetch = {"id"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; EnableOnlyHoistCommonFactor(&optimizer); GraphDef output; OptimizeTwice(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // Add Div // / \ / \ // Div Div -> Add x // / \ / \ / \ // y1 x y2 x y1 y2 // // If "root" op is AddN and shapes does not match, this rewrite is not // possible and graph should stay intact. NodeMap node_map(&output); if ((use_addn && !matching_shapes) || use_ints) { VerifyGraphsMatch(item.graph, output, __LINE__); } else { EXPECT_EQ(9, output.node_size()); const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add")); ASSERT_TRUE(new_add_node != nullptr) << "Hoisted Add node not found"; EXPECT_EQ("y1", new_add_node->input(0)); EXPECT_EQ("y2", new_add_node->input(1)); const NodeDef* new_div_node = node_map.GetNode(HoistDivName("add")); ASSERT_TRUE(new_div_node != nullptr) << "Hoisted Div node not found"; EXPECT_EQ(new_add_node->name(), new_div_node->input(0)); EXPECT_EQ("x", new_div_node->input(1)); const NodeDef* id_node = node_map.GetNode("id"); ASSERT_TRUE(id_node != nullptr) << "Id node not found"; EXPECT_EQ("id", id_node->name()); EXPECT_EQ(HoistDivName("add"), id_node->input(0)); } auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); if (use_ints) { test::ExpectTensorEqual<int32>(tensors_expected[0], tensors[0]); } else { test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } } } } } TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Output z = ops::Complex(s.WithOpName("z"), re, im); Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2}); Output conj = ops::Conj(s.WithOpName("conj"), z); Output transp = ops::Transpose(s.WithOpName("trans"), conj, perm); GrapplerItem item; item.fetch = {"trans"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(7, output.node_size()); const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose"; const string optimized_name = strings::StrCat(p, "_", "trans"); const NodeDef* trans_fused_node = node_map.GetNode(optimized_name); ASSERT_NE(trans_fused_node, nullptr); EXPECT_EQ("ConjugateTranspose", trans_fused_node->op()); EXPECT_EQ("z", trans_fused_node->input(0)); EXPECT_EQ("perm", trans_fused_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Output z = ops::Complex(s.WithOpName("z"), re, im); Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2}); Output conj = ops::Conj(s.WithOpName("conj"), z); Output transp = ops::ConjugateTranspose(s.WithOpName("conjugate_trans"), conj, perm); GrapplerItem item; item.fetch = {"conjugate_trans"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(7, output.node_size()); const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose"; const string optimized_name = strings::StrCat(p, "_", "conjugate_trans"); const NodeDef* conjugate_trans_fused_node = node_map.GetNode(optimized_name); ASSERT_NE(conjugate_trans_fused_node, nullptr); EXPECT_EQ("Transpose", conjugate_trans_fused_node->op()); EXPECT_EQ("z", conjugate_trans_fused_node->input(0)); EXPECT_EQ("perm", conjugate_trans_fused_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Output z = ops::Complex(s.WithOpName("z"), re, im); Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2}); Output trans = ops::Transpose(s.WithOpName("trans"), z, perm); Output conj = ops::Conj(s.WithOpName("conj"), trans); GrapplerItem item; item.fetch = {"conj"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(7, output.node_size()); const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose"; const string optimized_name = strings::StrCat(p, "_", "conj"); const NodeDef* conj_fused_node = node_map.GetNode(optimized_name); ASSERT_NE(conj_fused_node, nullptr); EXPECT_EQ("ConjugateTranspose", conj_fused_node->op()); EXPECT_EQ("z", conj_fused_node->input(0)); EXPECT_EQ("perm", conj_fused_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) { for (const string matmul_type : {"MatMul", "SparseMatMul", "BatchMatMul"}) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Output b = ops::Const(s.WithOpName("b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2}); Output trans_a = ops::Transpose(s.WithOpName("trans_a"), a, perm); Output trans_b = ops::Transpose(s.WithOpName("trans_b"), b, perm); auto matmul_op = s.WithOpName("matmul"); if (matmul_type == "MatMul") { Output matmul = ops::MatMul(matmul_op, trans_a, trans_b); } else if (matmul_type == "SparseMatMul") { Output matmul = ops::SparseMatMul(matmul_op, trans_a, trans_b); } else if (matmul_type == "BatchMatMul") { Output matmul = ops::BatchMatMul(matmul_op, trans_a, trans_b); } GrapplerItem item; item.fetch = {"matmul"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; EnableOnlyFoldTransposeIntoMatMul(&optimizer); GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); EXPECT_EQ(7, output.node_size()); const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul"; const string optimized_name = strings::StrCat(p, "_", "matmul"); const NodeDef* matmul_fused_node = node_map.GetNode(optimized_name); ASSERT_NE(matmul_fused_node, nullptr); EXPECT_EQ("a", matmul_fused_node->input(0)); EXPECT_EQ("b", matmul_fused_node->input(1)); if (matmul_type == "BatchMatMul") { EXPECT_TRUE(matmul_fused_node->attr().at("adj_x").b()); EXPECT_TRUE(matmul_fused_node->attr().at("adj_y").b()); } else { EXPECT_TRUE(matmul_fused_node->attr().at("transpose_a").b()); EXPECT_TRUE(matmul_fused_node->attr().at("transpose_b").b()); } auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } } TEST_F(ArithmeticOptimizerTest, FoldConjugateTransposeIntoBatchMatMul) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output re_a = ops::Const(s.WithOpName("re_a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Output im_a = ops::Const(s.WithOpName("im_a"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2}); Output re_b = ops::Const(s.WithOpName("re_b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Output im_b = ops::Const(s.WithOpName("im_b"), {-5.0f, -6.0f, -7.0f, -8.0f}, {2, 2}); Output a = ops::Complex(s.WithOpName("a"), re_a, im_a); Output b = ops::Complex(s.WithOpName("b"), re_b, im_b); Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2}); Output trans_a = ops::ConjugateTranspose(s.WithOpName("trans_a"), a, perm); Output trans_b = ops::ConjugateTranspose(s.WithOpName("trans_b"), b, perm); Output matmul = ops::BatchMatMul(s.WithOpName("matmul"), trans_a, trans_b); GrapplerItem item; item.fetch = {"matmul"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); ArithmeticOptimizer optimizer; GraphDef output; OptimizeTwice(&optimizer, &item, &output); NodeMap node_map(&output); ASSERT_EQ(11, output.node_size()); const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul"; const string optimized_name = strings::StrCat(p, "_", "matmul"); const NodeDef* optimized_matmul = node_map.GetNode(optimized_name); ASSERT_NE(optimized_matmul, nullptr); EXPECT_EQ("a", optimized_matmul->input(0)); EXPECT_EQ("b", optimized_matmul->input(1)); EXPECT_TRUE(optimized_matmul->attr().at("adj_x").b()); EXPECT_TRUE(optimized_matmul->attr().at("adj_y").b()); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<complex64>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_IdentityReshape) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28})); Output inputs_shape = ops::Shape(s, inputs); // The target shape of the reshape is the concatenation of `batch_size` and // [3,28,28]. Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}), ops::Const(s, {1}, {1})); Output target_shape = ops::Concat( s.WithOpName("target_shape"), {batch_size, ops::Const(s, {3, 28, 28}, {3})}, ops::Const(s, {0}, {})); Output reshape = ops::Reshape(s, inputs, target_shape); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28})); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {{"Placeholder", x_t}}); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(0, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", x_t}}); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_IdentityReshapeBetweenSymbolicShapes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, -1, -1})); Output inputs_shape = ops::Shape(s, inputs); // The target shape of the reshape is the concatenation of `batch_size`, 3, // `height, and `width`. Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}), ops::Const(s, {1}, {1})); Output height = ops::Slice(s, inputs_shape, ops::Const(s, {2}, {1}), ops::Const(s, {1}, {1})); Output width = ops::Slice(s, inputs_shape, ops::Const(s, {3}, {1}), ops::Const(s, {1}, {1})); Output target_shape = ops::Concat(s.WithOpName("target_shape"), {batch_size, ops::Const(s, {3}, {1}), height, width}, ops::Const(s, {0}, {})); Output reshape = ops::Reshape(s, inputs, target_shape); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28})); GrapplerItem item; item.fetch = {"outputs"}; item.feed = {{"Placeholder", x_t}}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; // Assume valid feed shape in aggressive mode. ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE); EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(0, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotAssumeValidFeeds) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28})); Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4}); Output reshape = ops::Reshape(s, inputs, target_shape); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28})); GrapplerItem item; item.fetch = {"outputs"}; item.feed = {{"Placeholder", x_t}}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); // The reshape is preserved because the shape of the placeholder can be // different from the shape of the actual feed. EXPECT_EQ(1, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_AssumeValidFeedsInAggressiveMode) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28})); Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4}); Output reshape = ops::Reshape(s, inputs, target_shape); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28})); GrapplerItem item; item.fetch = {"outputs"}; item.feed = {{"Placeholder", x_t}}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE); EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(0, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotIdentityReshape) { // Reshape from [-1,3,28,28] to [8,-1,28,28] is not identity, because it can // be from [4,3,28,28] to [8,6,28,28]. tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28})); Output reshape = ops::Reshape(s, inputs, ops::Const(s, {8, -1, 28, 28}, {4})); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 3, 28, 28})); item.feed = {{"Placeholder", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(1, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotIdentityReshapeTooManyUnknownDimSizes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3})); Output reshape = ops::Reshape(s, inputs, ops::Const(s, {-1, -1}, {2})); Output outputs = ops::Identity(s.WithOpName("outputs"), reshape); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(1, CountOpNodes(output, "Reshape")); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_CombineReshapes) { // Converts an NCHW_VECT_C tensor to NHWC and then flattens it to 2D. The two // reshapes should be combined. tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output nchw_vect_c = ops::Placeholder(s.WithOpName("nchw_vect_c"), DT_INT8, ops::Placeholder::Shape({8, 3, 28, 28, 4})); Output transpose = ops::Transpose(s.WithOpName("transpose"), nchw_vect_c, ops::Const(s.WithOpName("perm"), {0, 2, 3, 1, 4}, {5})); Output nhwc = ops::Reshape( s.WithOpName("nhwc"), transpose, ops::Const(s.WithOpName("nhwc_shape"), {8, 28, 28, 12}, {4})); Output flatten = ops::Reshape( s.WithOpName("flatten"), nhwc, ops::Const(s.WithOpName("flatten_shape"), {8, 28 * 28 * 12}, {2})); Output outputs = ops::Identity(s.WithOpName("outputs"), flatten); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({8, 3, 28, 28, 4})); item.feed = {{"nchw_vect_c", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantReshape(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); EXPECT_EQ(1, CountOpNodes(output, "Reshape")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast) { tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0"); Output nhwc_uint8 = ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3})); Output nhwc_fp32 = ops::Cast(s, nhwc_uint8, DT_FLOAT); Output nchw_fp32 = ops::Transpose(s, nhwc_fp32, ops::Const(s, {0, 3, 1, 2}, {4})); Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output)); item.graph.Swap(&output); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); const NodeDef* transpose_node = nullptr; for (const NodeDef& node : output.node()) { if (node.op() == "Transpose") { EXPECT_EQ(transpose_node, nullptr); EXPECT_EQ(DT_UINT8, node.attr().at("T").type()); transpose_node = &node; } } EXPECT_NE(transpose_node, nullptr); for (const NodeDef& node : output.node()) { if (node.op() == "Cast") { EXPECT_EQ(NodeName(node.input(0)), transpose_node->name()); } } } TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCast) { tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0"); Output nhwc_fp32 = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3})); Output nhwc_uint8 = ops::Cast(s, nhwc_fp32, DT_UINT8); Output nchw_uint8 = ops::Transpose(s, nhwc_uint8, ops::Const(s, {0, 3, 1, 2}, {4})); Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_uint8); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output)); item.graph.Swap(&output); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); int num_transposes = 0; for (const NodeDef& node : output.node()) { if (node.op() == "Transpose") { EXPECT_EQ(DT_UINT8, node.attr().at("T").type()); EXPECT_EQ(node.input(0), "Cast"); ++num_transposes; } } EXPECT_EQ(1, num_transposes); } TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs_shape = ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4}); Output inputs = ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT); Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4}); Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4}); Output perm3 = ops::Const(s.WithOpName("perm3"), {0, 1, 2, 3}, {4}); Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm1); Output transpose2 = ops::Transpose(s.WithOpName("transpose2"), transpose1, perm2); Output transpose3 = ops::Transpose(s.WithOpName("transpose3"), inputs, perm3); Output id1 = ops::Identity(s.WithOpName("id1"), transpose2); Output id2 = ops::Identity(s.WithOpName("id2"), transpose3); GrapplerItem item; item.fetch = {"id1", "id2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveIdentityTranspose(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); std::set<string> nodes_after_optimization; for (const NodeDef& node : output.node()) { nodes_after_optimization.insert(node.name()); } EXPECT_EQ(nodes_after_optimization, std::set<string>({"id1", "id2", "inputs_shape", "inputs"})); } TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs_shape = ops::Const(s.WithOpName("inputs_shape"), {8, 9, 28, 28}, {4}); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT, ops::Placeholder::Shape({8, 12, 28, 28})); OutputList split = ops::Split(s, ops::Const(s, 1), inputs, 3).output; Output perm1 = ops::Const(s, {0, 2, 3, 1}, {4}); Output perm2 = ops::Const(s, {0, 3, 1, 2}, {4}); Output branch0 = split[0]; Output branch1 = ops::Transpose(s, ops::Transpose(s, split[1], perm1), perm2); Output branch2 = split[2]; Output concat = ops::Concat(s, {branch0, branch1, branch2}, ops::Const(s, 1)); Output outputs = ops::Identity(s.WithOpName("outputs"), concat); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 12, 28, 28})); item.feed = {{"inputs", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveIdentityTranspose(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); for (const NodeDef& node : output.node()) { if (node.op() == "Concat") { EXPECT_EQ(node.input(0), "Split"); EXPECT_EQ(node.input(1), "Split:1"); EXPECT_EQ(node.input(2), "Split:2"); } } auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({2, 3})); Output transpose1 = ops::Transpose(s, inputs, ops::Const(s, {1, 0})); Output transpose2 = ops::Transpose(s, transpose1, ops::Const(s, {1, 0})); Output outputs = ops::Identity(s.WithOpName("outputs").WithControlDependencies(transpose2), ops::Const(s.WithOpName("outputs_const"), 1.0f)); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3})); item.feed = {{"Placeholder", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveIdentityTranspose(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); NodeMap node_map(&output); const NodeDef* outputs_node = node_map.GetNode("outputs"); EXPECT_EQ(2, outputs_node->input_size()); EXPECT_EQ(outputs_node->input(0), "outputs_const"); EXPECT_EQ(outputs_node->input(1), "^Placeholder"); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, NotRemoveTransposes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs_shape = ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4}); Output inputs = ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT); Output perm = ops::Const(s.WithOpName("perm"), {1, 2, 3, 0}, {4}); Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm); Output transpose2 = ops::Transpose(s.WithOpName("transpose2"), transpose1, perm); Output outputs = ops::Identity(s.WithOpName("outputs"), transpose2); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveIdentityTranspose(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); EXPECT_EQ(6, output.node_size()); } TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesThroughChain) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs_shape = ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4}); Output inputs = ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT); Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4}); Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4}); Output transpose1 = ops::Transpose( s.WithOpName("transpose1").WithControlDependencies(perm2), inputs, perm1); Output identity = ops::Identity(s.WithOpName("id"), transpose1); Output transpose2 = ops::Transpose(s.WithOpName("transpose2"), identity, perm2); Output id1 = ops::Identity(s.WithOpName("id1"), transpose2); GrapplerItem item; item.fetch = {"id1"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE); EnableOnlyRemoveIdentityTranspose(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); std::set<string> nodes_after_optimization; for (const NodeDef& node : output.node()) { nodes_after_optimization.insert(node.name()); if (node.name() == "id") { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("inputs", node.input(0)); EXPECT_EQ("^perm2", node.input(1)); } if (node.name() == "id1") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("id", node.input(0)); } } EXPECT_EQ(nodes_after_optimization, std::set<string>({"id", "id1", "inputs_shape", "inputs", "perm2"})); } TEST_F(ArithmeticOptimizerTest, FoldMulToTransposeConv) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3})); Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {}); Output scaled_inputs = ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale); Output perm_nhwc_to_nchw = ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4}); Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"), scaled_inputs, perm_nhwc_to_nchw); Output weights = ops::Const(s.WithOpName("weights"), Input::Initializer(127.0f, {5, 5, 3, 16})); Output conv = ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1}, "VALID", ops::Conv2D::DataFormat("NCHW")); Output outputs = ops::Identity(s.WithOpName("outputs"), conv); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyFoldMultipleIntoConv(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); NodeMap node_map(&output); // `conv` is now a folded convolution with scaled weights. const NodeDef* folded_conv = node_map.GetNode(conv.node()->name()); ASSERT_NE(folded_conv, nullptr); const NodeDef* folded_conv_weights = node_map.GetNode(folded_conv->input(1)); ASSERT_NE(folded_conv_weights, nullptr); EXPECT_EQ("Mul", folded_conv_weights->op()); // Its input should be a transpose of `inputs`. const NodeDef* transpose = node_map.GetNode(NodeName(folded_conv->input(0))); ASSERT_NE(transpose, nullptr); EXPECT_EQ("inputs", transpose->input(0)); } TEST_F(ArithmeticOptimizerTest, NotFoldMulAcrossPreservedTranspose) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3})); Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {}); Output scaled_inputs = ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale); Output perm_nhwc_to_nchw = ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4}); Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"), scaled_inputs, perm_nhwc_to_nchw); Output weights = ops::Const(s.WithOpName("weights"), Input::Initializer(127.0f, {5, 5, 3, 16})); Output conv = ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1}, "VALID", ops::Conv2D::DataFormat("NCHW")); Output outputs = ops::Identity(s.WithOpName("outputs"), conv); Tensor inputs_nchw_tensor(DT_FLOAT, {8, 3, 28, 28}); memset(const_cast<char*>(inputs_nchw_tensor.tensor_data().data()), 0, inputs_nchw_tensor.tensor_data().size()); GrapplerItem item; item.fetch = {"outputs"}; item.feed = {{"inputs_nchw", inputs_nchw_tensor}}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output)); item.graph.Swap(&output); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); NodeMap node_map(&output); const NodeDef* inputs_nchw_node_def = node_map.GetNode(inputs_nchw.node()->name()); EXPECT_EQ(NodeName(inputs_nchw_node_def->input(0)), scaled_inputs.node()->name()); } TEST_F(ArithmeticOptimizerTest, FoldMulToConv) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 28, 3})); Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {}); Output scaled_inputs = ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale); Output weights = ops::Const(s.WithOpName("weights"), Input::Initializer(127.0f, {5, 5, 5, 3, 16})); Output conv = ops::Conv3D(s.WithOpName("conv"), scaled_inputs, weights, {1, 1, 1, 1, 1}, "VALID"); Output outputs = ops::Identity(s.WithOpName("outputs"), conv); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output)); item.graph.Swap(&output); TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); NodeMap node_map(&output); // `conv` is now a folded convolution on `inputs` and scaled weights. const NodeDef* folded_conv = node_map.GetNode(conv.node()->name()); CHECK_EQ(inputs.node()->name(), NodeName(folded_conv->input(0))); CHECK_EQ(node_map.GetNode(NodeName(folded_conv->input(1)))->op(), "Mul"); } TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) { // This unit test exercises two optimizations, folding mul into conv, and // reordering cast and transpose. // // Conv2D(Transpose(Mul(Cast(I), S)), W) // => // Conv2D(Transpose(Cast(I)), W*S) // => // Conv2D(Cast(Transpose(I)), W*S) tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0"); Output inputs = ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3})); Output cast = ops::Cast(s, inputs, DT_FLOAT); Output mul = ops::Mul(s, cast, ops::Const(s, 1.0f / 255.0f)); Output transpose = ops::Transpose(s, mul, ops::Const(s.WithOpName("perm"), {0, 3, 1, 2})); Output weights = ops::Const(s.WithOpName("weights"), Input::Initializer(127.0f, {5, 5, 3, 16})); Output conv = ops::Conv2D(s, transpose, weights, {1, 1, 1, 1}, "VALID", ops::Conv2D::DataFormat("NCHW")); Output outputs = ops::Identity(s.WithOpName("outputs"), conv); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; // all optimization stages are on OptimizeTwiceAndPrune(&optimizer, &item, &output, /*const_folding=*/true); NodeMap node_map(&output); // Expected names for reordered cast and transpose. const string p = "ArithmeticOptimizer/ReorderCastAndTranspose_"; const string optimized_cast_name = strings::StrCat(p, "float_Cast"); const string optimized_transpose_name = strings::StrCat(p, "uint8_Transpose"); // Expected names for folded multiply and conv. const string optimized_weights = "ArithmeticOptimizer/FoldMultiplyIntoConv_scaled_Conv2D_weights"; const NodeDef* inputs_node = node_map.GetNode("Placeholder"); const NodeDef* transpose_node = node_map.GetNode(optimized_transpose_name); const NodeDef* cast_node = node_map.GetNode(optimized_cast_name); const NodeDef* weights_node = node_map.GetNode(optimized_weights); const NodeDef* conv_node = node_map.GetNode("Conv2D"); ASSERT_NE(inputs_node, nullptr); ASSERT_NE(transpose_node, nullptr); ASSERT_NE(cast_node, nullptr); ASSERT_NE(weights_node, nullptr); ASSERT_NE(conv_node, nullptr); EXPECT_EQ(output.node_size(), 7); EXPECT_EQ(transpose_node->input(0), inputs_node->name()); EXPECT_EQ(cast_node->input(0), transpose_node->name()); EXPECT_EQ(conv_node->input(0), cast_node->name()); EXPECT_EQ(conv_node->input(1), weights_node->name()); } TEST_F(ArithmeticOptimizerTest, OptimizeMultipleMulTransposeConv) { // This unit test exercises optimization of folding mul into conv for // multiple nodes in the graph. tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0"); GrapplerItem item; Output conv[2]; for (int i = 0; i < 2; ++i) { Output inputs = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 3, 28, 28})); Output mul = ops::Mul(s, inputs, ops::Const(s, 1.0f / 255.0f)); Output weights = ops::Const(s.WithOpName("weights"), Input::Initializer(127.0f, {5, 5, 3, 16})); conv[i] = ops::Conv2D(s, mul, weights, {1, 1, 1, 1}, "VALID", ops::Conv2D::DataFormat("NCHW")); } Output outputs = ops::Add(s.WithOpName("outputs"), conv[0], conv[1]); item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyFoldMultipleIntoConv(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output, /*const_folding=*/true); NodeMap node_map(&output); using strings::StrCat; const string p = "ArithmeticOptimizer/FoldMultiplyIntoConv_"; const string optimized_weights = StrCat(p, "scaled_Conv2D_weights"); const string optimized_weights_1 = StrCat(p, "scaled_Conv2D_1_weights_1"); const NodeDef* weights_node = node_map.GetNode(optimized_weights); const NodeDef* weights_node_1 = node_map.GetNode(optimized_weights_1); const NodeDef* conv_node = node_map.GetNode("Conv2D"); const NodeDef* conv_node_1 = node_map.GetNode("Conv2D_1"); ASSERT_NE(weights_node, nullptr); ASSERT_NE(weights_node_1, nullptr); ASSERT_NE(conv_node, nullptr); ASSERT_NE(conv_node_1, nullptr); EXPECT_EQ(conv_node->input(1), weights_node->name()); EXPECT_EQ(conv_node_1->input(1), weights_node_1->name()); } TEST_F(ArithmeticOptimizerTest, CombineBitcasts) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_UINT8, ops::Placeholder::Shape({2, 3})); Output bc1 = ops::Bitcast(s.WithOpName("bc1"), inputs, DT_QINT8); Output bc2 = ops::Bitcast(s.WithOpName("bc2"), bc1, DT_INT8); Output outputs = ops::Identity(s.WithOpName("outputs"), bc2); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_UINT8>(TensorShape({2, 3})); item.feed = {{"inputs", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantBitcast(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); NodeMap node_map(&output); // Bitcasts combined into a single op and inputs redirected to updated Bitcast EXPECT_EQ(3, output.node_size()); EXPECT_EQ(1, CountOpNodes(output, "Bitcast")); EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "bc2")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8, ops::Placeholder::Shape({2, 3})); Output bc1 = ops::Bitcast(s, inputs, DT_QINT8); Output bc2 = ops::Bitcast(s, bc1, DT_INT8); Output outputs = ops::Identity(s.WithOpName("outputs"), bc2); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3})); item.feed = {{"inputs", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantBitcast(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); NodeMap node_map(&output); // Bitcasts removed and inputs redirected to outputs EXPECT_EQ(2, output.node_size()); EXPECT_EQ(0, CountOpNodes(output, "Bitcast")); EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8, ops::Placeholder::Shape({2, 3})); Output cast = ops::Cast(s, inputs, DT_INT8); Output outputs = ops::Identity(s.WithOpName("outputs"), cast); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3})); item.feed = {{"inputs", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveRedundantCast(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); NodeMap node_map(&output); // Cast removed and inputs redirected to outputs EXPECT_EQ(2, output.node_size()); EXPECT_EQ(0, CountOpNodes(output, "Cast")); EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs")); auto tensors = EvaluateNodes(output, item.fetch, item.feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); tensorflow::Scope sx = s.NewSubScope("x"); tensorflow::Scope sy = s.NewSubScope("y"); auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT); auto add_ab = ops::Add(sx.WithOpName("Add_ab"), a, b); auto add_abc = ops::Add(sy.WithOpName("Add_abc"), add_ab, c); auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // + // / \ // + c --> AddN(a, b, c) // / \ // a b EXPECT_EQ(5, output.node_size()); NodeMap node_map(&output); // check add tree was replaced with AddN const NodeDef* collapsed_add = node_map.GetNode("y/ArithmeticOptimizer/AddOpsRewrite_Add_abc"); ASSERT_NE(collapsed_add, nullptr); EXPECT_EQ("AddN", collapsed_add->op()); EXPECT_EQ(3, collapsed_add->input_size()); EXPECT_EQ("a", collapsed_add->input(0)); EXPECT_EQ("b", collapsed_add->input(1)); EXPECT_EQ("c", collapsed_add->input(2)); // check output was re-wired to new node const NodeDef* updated_outputs = node_map.GetNode("outputs"); ASSERT_NE(updated_outputs, nullptr); EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT); auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b); auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c); auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT); auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT); auto z = ops::Variable(s.WithOpName("z"), {2, 2}, DT_FLOAT); auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y); auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z); auto mul = ops::Multiply(s.WithOpName("Mul"), add_abc, add_xyz); auto outputs = ops::Identity(s.WithOpName("outputs"), mul); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // * // / \ // + + * // / \ / \ / \ // + c x + --> AddN(a, b, c) AddN(x, y, z)) // / \ / \ // a b y z EXPECT_EQ(10, output.node_size()); NodeMap node_map(&output); // check left Add subtree replaced with AddN const NodeDef* collapsed_left = node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc"); ASSERT_NE(collapsed_left, nullptr); EXPECT_EQ("AddN", collapsed_left->op()); EXPECT_EQ(3, collapsed_left->input_size()); EXPECT_EQ("a", collapsed_left->input(0)); EXPECT_EQ("b", collapsed_left->input(1)); EXPECT_EQ("c", collapsed_left->input(2)); // check right Add subtree replaced with AddN const NodeDef* collapsed_right = node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_xyz"); ASSERT_NE(collapsed_right, nullptr); EXPECT_EQ("AddN", collapsed_right->op()); EXPECT_EQ(3, collapsed_right->input_size()); EXPECT_EQ("x", collapsed_right->input(0)); EXPECT_EQ("y", collapsed_right->input(1)); EXPECT_EQ("z", collapsed_right->input(2)); // check that Mul inputs re-wired to new Nodes const NodeDef* updated_mul = node_map.GetNode("Mul"); ASSERT_NE(updated_mul, nullptr); EXPECT_EQ("Mul", updated_mul->op()); EXPECT_EQ(2, updated_mul->input_size()); EXPECT_EQ(collapsed_left->name(), updated_mul->input(0)); EXPECT_EQ(collapsed_right->name(), updated_mul->input(1)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT); auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b); auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c); auto add_all = ops::Add(s.WithOpName("Add_all"), add_ab, add_bc); auto outputs = ops::Identity(s.WithOpName("outputs"), add_all); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // + // / \ // + + --> AddN(a, b, b, c) // / \ / \ ^ // a b c b added twice! EXPECT_EQ(5, output.node_size()); NodeMap node_map(&output); // check Add tree replaced with AddN const NodeDef* collapsed_add = node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_all"); ASSERT_NE(collapsed_add, nullptr); EXPECT_EQ("AddN", collapsed_add->op()); EXPECT_EQ(4, collapsed_add->input_size()); EXPECT_EQ("a", collapsed_add->input(0)); EXPECT_EQ("b", collapsed_add->input(1)); EXPECT_EQ("b", collapsed_add->input(2)); EXPECT_EQ("c", collapsed_add->input(3)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); // unknown input shape propagated symbolically through the graph auto input = ops::Variable(s.WithOpName("input"), {-1, 2}, DT_FLOAT); // [a, b, c] have symbolically equal shapes auto a = ops::Sqrt(s.WithOpName("a"), input); auto b = ops::Square(s.WithOpName("b"), input); auto c = ops::Round(s.WithOpName("c"), input); // [add_ab, add_abc] shape must be inferred from inputs auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b); auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c); auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<std::pair<string, Tensor>> feed = {{"input", x_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // + // / \ // + c --> AddN(a, b, c) // / \ // a b EXPECT_EQ(6, output.node_size()); NodeMap node_map(&output); // check add tree was replaced with AddN const NodeDef* collapsed_add = node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc"); ASSERT_NE(collapsed_add, nullptr); EXPECT_EQ("AddN", collapsed_add->op()); EXPECT_EQ(3, collapsed_add->input_size()); EXPECT_EQ("a", collapsed_add->input(0)); EXPECT_EQ("b", collapsed_add->input(1)); EXPECT_EQ("c", collapsed_add->input(2)); // check output was re-wired to new node const NodeDef* updated_outputs = node_map.GetNode("outputs"); ASSERT_NE(updated_outputs, nullptr); EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {32, 32, 32}, DT_FLOAT); auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b); auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c); auto x = ops::Variable(s.WithOpName("x"), {32}, DT_FLOAT); auto y = ops::Variable(s.WithOpName("y"), {32, 32}, DT_FLOAT); auto z = ops::Variable(s.WithOpName("z"), {32, 32, 32}, DT_FLOAT); auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y); auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z); auto add_all = ops::Add(s.WithOpName("AddAll"), add_abc, add_xyz); auto outputs = ops::Identity(s.WithOpName("outputs"), add_all); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32})); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32})); auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // 1) [a, x], [b, y], [c, z] - aggregate same shapes first // 2) Build an aggregation tree minimizing cost of broadcast // // + + // / \ / \ // + + + AddN(c, z) // / \ / \ / \ // + c x + --> AddN(a, x) AddN(b, y) // / \ / \ // a b y z EXPECT_EQ(12, output.node_size()); NodeMap node_map(&output); // expected names of outer and inner nodes string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_AddAll"; string outer_0_add_name = "ArithmeticOptimizer/AddOpsRewrite_Internal_0_AddAll"; string inner_0_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_AddAll"; string inner_1_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_1_AddAll"; string inner_2_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_2_AddAll"; // Add [a, x] first const NodeDef* add_ax_node = node_map.GetNode(inner_0_add_name); ASSERT_NE(add_ax_node, nullptr); EXPECT_EQ("AddN", add_ax_node->op()); EXPECT_EQ(2, add_ax_node->input_size()); EXPECT_EQ("a", add_ax_node->input(0)); EXPECT_EQ("x", add_ax_node->input(1)); // Then add [b, y] const NodeDef* add_by_node = node_map.GetNode(inner_1_add_name); ASSERT_NE(add_by_node, nullptr); EXPECT_EQ("AddN", add_by_node->op()); EXPECT_EQ(2, add_by_node->input_size()); EXPECT_EQ("b", add_by_node->input(0)); EXPECT_EQ("y", add_by_node->input(1)); // Then add [c, z] const NodeDef* add_cz_node = node_map.GetNode(inner_2_add_name); ASSERT_NE(add_cz_node, nullptr); EXPECT_EQ("AddN", add_cz_node->op()); EXPECT_EQ(2, add_cz_node->input_size()); EXPECT_EQ("c", add_cz_node->input(0)); EXPECT_EQ("z", add_cz_node->input(1)); // Then add results together starting from smaller shapes [a, x] + [b, y] const NodeDef* outer_0_node = node_map.GetNode(outer_0_add_name); ASSERT_NE(outer_0_node, nullptr); EXPECT_EQ("Add", outer_0_node->op()); EXPECT_EQ(2, outer_0_node->input_size()); EXPECT_EQ(inner_0_add_name, outer_0_node->input(0)); EXPECT_EQ(inner_1_add_name, outer_0_node->input(1)); // And finally top level Add node const NodeDef* outer_node = node_map.GetNode(outer_add_name); ASSERT_NE(outer_node, nullptr); EXPECT_EQ("Add", outer_node->op()); EXPECT_EQ(2, outer_node->input_size()); EXPECT_EQ(outer_0_add_name, outer_node->input(0)); EXPECT_EQ(inner_2_add_name, outer_node->input(1)); // And outputs reading new top level Add node const NodeDef* updated_outputs = node_map.GetNode("outputs"); ASSERT_NE(updated_outputs, nullptr); EXPECT_EQ(outer_add_name, updated_outputs->input(0)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); // We have a small input with one unknown dimension auto small = ops::Variable(s.WithOpName("small"), {-1, 1, 1}, DT_DOUBLE); // And second input which is larger, but has the same unknown dimension // device spec prevents this node from rewriting auto d = "/device:CPU:0"; auto v = ops::Variable(s.WithOpName("v"), {1, 32, 32}, DT_DOUBLE); auto large = ops::Add(s.WithOpName("large").WithDevice(d), small, v); // [a, c] have {?, 1, 1} shape, [b] has {?, 32, 32} auto a = ops::Sqrt(s.WithOpName("a"), small); auto b = ops::Square(s.WithOpName("b"), large); auto c = ops::Round(s.WithOpName("c"), small); // [add_ab, add_abc] shape must be inferred from inputs auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b); auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c); auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto s_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({8, 1, 1})); auto v_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({1, 32, 32})); std::vector<std::pair<string, Tensor>> feed = {{"small", s_t}, {"v", v_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyAddToAddNCombining(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: it's much cheaper to add small // tensors, and do the broadcast just once // // + + // / \ / \ // + c --> + b // / \ / \ // a b a c EXPECT_EQ(9, output.node_size()); NodeMap node_map(&output); // expected names of outer and inner nodes string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_Add_abc"; string inner_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_Add_abc"; // outer Add node const NodeDef* outer_add = node_map.GetNode(outer_add_name); ASSERT_NE(outer_add, nullptr); EXPECT_EQ("Add", outer_add->op()); EXPECT_EQ(inner_add_name, outer_add->input(0)); EXPECT_EQ("b", outer_add->input(1)); // inner AddN node const NodeDef* inner_add = node_map.GetNode(inner_add_name); ASSERT_NE(inner_add, nullptr); EXPECT_EQ(2, inner_add->input_size()); EXPECT_EQ("a", inner_add->input(0)); EXPECT_EQ("c", inner_add->input(1)); // check output was re-wired to new node const NodeDef* updated_outputs = node_map.GetNode("outputs"); ASSERT_NE(updated_outputs, nullptr); EXPECT_EQ(outer_add_name, updated_outputs->input(0)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, RemoveNegation) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT); auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT); Output neg_x = ops::Neg(s.WithOpName("Neg_x"), x); Output neg_y = ops::Neg(s.WithOpName("Neg_y"), y); Output add_x_y = ops::Add(s.WithOpName("Add_x_y"), x, y); Output add_negx_y = ops::Add(s.WithOpName("Add_negx_y"), neg_x, y); Output add_x_negy = ops::Add(s.WithOpName("Add_x_negy"), x, neg_y); Output add_negx_negy = ops::Add(s.WithOpName("Add_negx_negy"), neg_x, neg_y); Output sub_x_y = ops::Sub(s.WithOpName("Sub_x_y"), x, y); Output sub_negx_y = ops::Sub(s.WithOpName("Sub_negx_y"), neg_x, y); Output sub_x_negy = ops::Sub(s.WithOpName("Sub_x_negy"), x, neg_y); Output sub_negx_negy = ops::Sub(s.WithOpName("Sub_negx_negy"), neg_x, neg_y); Output neg_x_with_dep = ops::Neg( s.WithOpName("Neg_x_with_dep").WithControlDependencies({add_x_y}), x); Output add_negx_with_dep_y = ops::Add(s.WithOpName("Add_negx_with_dep_y"), neg_x_with_dep, y); auto add_all = ops::AddN(s.WithOpName("add_all"), {add_x_y, add_negx_y, add_x_negy, add_negx_negy, sub_x_y, sub_negx_y, sub_x_negy, sub_negx_negy, add_negx_with_dep_y}); GrapplerItem item; item.fetch = {"add_all"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2})); std::vector<std::pair<string, Tensor>> feed = {{"x", x_t}, {"y", y_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveNegation(&optimizer); OptimizeTwice(&optimizer, &item, &output); EXPECT_EQ(item.graph.node_size(), output.node_size()); int found = 0; for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "Add_negx_y") { ++found; EXPECT_EQ("Sub", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("x", node.input(1)); } else if (node.name() == "Add_x_negy") { ++found; EXPECT_EQ("Sub", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("y", node.input(1)); } else if (node.name() == "Add_negx_negy") { ++found; EXPECT_EQ("Sub", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("Neg_x", node.input(0)); EXPECT_EQ("y", node.input(1)); } else if (node.name() == "Sub_x_negy") { ++found; EXPECT_EQ("Add", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("y", node.input(1)); } else if (node.name() == "Sub_negx_negy") { ++found; EXPECT_EQ("Sub", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("x", node.input(1)); } else if (node.name() == "Add_negx_with_dep_y") { ++found; EXPECT_EQ("Sub", node.op()); EXPECT_EQ(3, node.input_size()); EXPECT_EQ("y", node.input(0)); EXPECT_EQ("x", node.input(1)); EXPECT_EQ("^Add_x_y", node.input(2)); } } EXPECT_EQ(6, found); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMul) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2}); Output sqrt_y = ops::Sqrt(s.WithOpName("sqrt_y"), y); Output div_x_sqrt_y = ops::Div(s.WithOpName("output"), x, sqrt_y); GrapplerItem item; item.fetch = {"output"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlySqrtDivToRsqrtMul(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); EXPECT_EQ(item.graph.node_size(), output.node_size()); for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "output") { EXPECT_EQ("Mul", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("sqrt_y", node.input(1)); } else if (node.name() == "sqrt_y") { EXPECT_EQ("Rsqrt", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("y", node.input(0)); } } } TEST_F(ArithmeticOptimizerTest, ConvertPow) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); auto y2 = ops::Const(s.WithOpName("y2"), {2.0f, 2.0f}, {1, 2}); auto y1 = ops::Const(s.WithOpName("y1"), {1.0f, 1.0f}, {1, 2}); auto yPoint5 = ops::Const(s.WithOpName("y.5"), {0.5f, 0.5f}, {1, 2}); auto y0 = ops::Const(s.WithOpName("y0"), {0.0f, 0.0f}, {1, 2}); auto y_Point5 = ops::Const(s.WithOpName("y_.5"), {-0.5f, -0.5f}, {1, 2}); auto y_1 = ops::Const(s.WithOpName("y_1"), {-1.0f, -1.0f}, {1, 2}); auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2}); auto z = ops::Const(s.WithOpName("z"), {42.0f}, {}); auto ones = ops::Const(s.WithOpName("ones"), {1.0f, 1.0f, 1.0f}, {1, 3}); auto zeros = ops::Const(s.WithOpName("zeros"), {0.0f, 0.0f, 0.0f}, {1, 3}); Output out2 = ops::Pow(s.WithOpName("out2"), x, y2); Output out1 = ops::Pow(s.WithOpName("out1"), x, y1); Output outPoint5 = ops::Pow(s.WithOpName("out.5"), x, yPoint5); Output out0 = ops::Pow(s.WithOpName("out0"), x, y0); Output out_Point5 = ops::Pow(s.WithOpName("out_.5"), x, y_Point5); Output out_1 = ops::Pow(s.WithOpName("out_1"), x, y_1); Output out = ops::Pow(s.WithOpName("out"), x, y); Output out_bcast1 = ops::Pow(s.WithOpName("out_bcast1"), z, ones); Output out_bcast2 = ops::Pow(s.WithOpName("out_bcast2"), z, zeros); GrapplerItem item; item.fetch = {"out2", "out1", "out.5", "out0", "out_.5", "out_1", "out", "out_bcast1", "out_bcast2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(9, tensors_expected.size()); GraphDef got; ArithmeticOptimizer optimizer; EnableOnlyConvertPow(&optimizer); OptimizeAndPrune(&optimizer, &item, &got); auto tensors = EvaluateNodes(got, item.fetch); EXPECT_EQ(9, tensors.size()); for (int i = 0; i < tensors.size(); ++i) { EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements()); test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6); } GraphDef want; AddNode("x", "Const", {}, {}, &want); AddNode("y2", "Const", {}, {}, &want); AddNode("y1", "Const", {}, {}, &want); AddNode("y.5", "Const", {}, {}, &want); AddNode("y0", "Const", {}, {}, &want); AddNode("y_.5", "Const", {}, {}, &want); AddNode("y_1", "Const", {}, {}, &want); AddNode("y", "Const", {}, {}, &want); AddNode("z", "Const", {}, {}, &want); AddNode("ones", "Const", {}, {}, &want); AddNode("zeros", "Const", {}, {}, &want); AddNode("out2", "Square", {"x", AsControlDependency("y2")}, {}, &want); AddNode("out1", "Identity", {"x", AsControlDependency("y1")}, {}, &want); AddNode("out.5", "Sqrt", {"x", AsControlDependency("y.5")}, {}, &want); AddNode("out0", "Const", {AsControlDependency("x"), AsControlDependency("y0")}, {}, &want); AddNode("out_.5", "Rsqrt", {"x", AsControlDependency("y_.5")}, {}, &want); AddNode("out_1", "Reciprocal", {"x", AsControlDependency("y_1")}, {}, &want); AddNode("out", "Pow", {"x", "y"}, {}, &want); AddNode("out_bcast1", "Pow", {"z", "ones"}, {}, &want); AddNode("out_bcast2", "Pow", {"z", "zeros"}, {}, &want); CompareGraphs(want, got); } TEST_F(ArithmeticOptimizerTest, Log1p) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x1 = ops::Const(s.WithOpName("x1"), {1.0f, 1.0f}, {1, 2}); auto x2 = ops::Const(s.WithOpName("x2"), {2.0f, 2.0f}, {1, 2}); auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2}); auto a12 = ops::Add(s.WithOpName("a12").WithControlDependencies(x3), x1, x2); auto a23 = ops::Add(s.WithOpName("a23"), x2, x3); Output out1 = ops::Log(s.WithOpName("out1"), a12); Output out2 = ops::Log(s.WithOpName("out2"), a23); GrapplerItem item; item.fetch = {"out1", "out2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(2, tensors_expected.size()); GraphDef got; ArithmeticOptimizer optimizer; EnableOnlyLog1p(&optimizer); OptimizeAndPrune(&optimizer, &item, &got); auto tensors = EvaluateNodes(got, item.fetch); EXPECT_EQ(2, tensors.size()); for (int i = 0; i < 2; ++i) { EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements()); test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6); } GraphDef want; AddNode("x1", "Const", {}, {}, &want); AddNode("x2", "Const", {}, {}, &want); AddNode("x3", "Const", {}, {}, &want); AddNode("a23", "Add", {"x2", "x3"}, {}, &want); AddNode("out1", "Log1p", {"x2", AsControlDependency("x1"), AsControlDependency("x3")}, {}, &want); AddNode("out2", "Log", {"a23"}, {}, &want); CompareGraphs(want, got); } TEST_F(ArithmeticOptimizerTest, Expm1) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x1 = ops::Const(s.WithOpName("x1"), {2.0f, 2.0f}, {1, 2}); auto x2 = ops::Const(s.WithOpName("x2"), {1.0f, 1.0f}, {1, 2}); auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2}); auto exp1 = ops::Exp(s.WithOpName("exp1").WithControlDependencies(x3), x1); Output out1 = ops::Sub(s.WithOpName("out1"), exp1, x2); Output out2 = ops::Sub(s.WithOpName("out2"), exp1, x3); GrapplerItem item; item.fetch = {"out1", "out2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(2, tensors_expected.size()); GraphDef got; ArithmeticOptimizer optimizer; EnableOnlyExpm1(&optimizer); OptimizeAndPrune(&optimizer, &item, &got); auto tensors = EvaluateNodes(got, item.fetch); EXPECT_EQ(2, tensors.size()); for (int i = 0; i < 2; ++i) { EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements()); test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6); } GraphDef want; AddNode("x1", "Const", {}, {}, &want); AddNode("x2", "Const", {}, {}, &want); AddNode("x3", "Const", {}, {}, &want); AddNode("exp1", "Exp", {"x1", AsControlDependency("x3")}, {}, &want); AddNode("out1", "Expm1", {"x1", AsControlDependency("x2"), AsControlDependency("x3")}, {}, &want); AddNode("out2", "Sub", {"exp1", "x3"}, {}, &want); CompareGraphs(want, got); } TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_SimpleSwap) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT); auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b); auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c); auto outputs = ops::Identity(s.WithOpName("outputs"), mul2); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyMinimizeBroadcasts(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // * * // / \ / \ // * c --> * b // / \ / \ // a b a c NodeMap node_map(&output); const NodeDef* mul1_node = node_map.GetNode("mul1"); ASSERT_NE(mul1_node, nullptr); EXPECT_EQ("a", mul1_node->input(0)); EXPECT_EQ("c", mul1_node->input(1)); const NodeDef* mul2_node = node_map.GetNode("mul2"); ASSERT_NE(mul2_node, nullptr); EXPECT_EQ("mul1", mul2_node->input(0)); EXPECT_EQ("b", mul2_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto a = ops::Variable(s.WithOpName("a"), {32}, DT_DOUBLE); auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_DOUBLE); auto c = ops::Variable(s.WithOpName("c"), {32}, DT_DOUBLE); auto d = ops::Variable(s.WithOpName("d"), {32}, DT_DOUBLE); auto e = ops::Variable(s.WithOpName("e"), {32}, DT_DOUBLE); auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b); auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c); auto mul3 = ops::Mul(s.WithOpName("mul3"), mul2, d); auto mul4 = ops::Mul(s.WithOpName("mul4"), mul3, e); auto outputs = ops::Identity(s.WithOpName("outputs"), mul4); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32})); auto b_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32, 32})); auto c_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32})); auto d_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32})); auto e_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}, {"d", d_t}, {"e", e_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyMinimizeBroadcasts(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: Graph is "flattened" and // largest shape pushed to the top. // // * // / \ // * e * // / \ / \ // * d * b // / \ / \ // * c --> * * // / \ / \ / \ // a b a c d e NodeMap node_map(&output); const NodeDef* mul1_node = node_map.GetNode("mul1"); ASSERT_NE(mul1_node, nullptr); EXPECT_EQ("a", mul1_node->input(0)); EXPECT_EQ("c", mul1_node->input(1)); const NodeDef* mul2_node = node_map.GetNode("mul2"); ASSERT_NE(mul2_node, nullptr); EXPECT_EQ("d", mul2_node->input(0)); EXPECT_EQ("e", mul2_node->input(1)); const NodeDef* mul3_node = node_map.GetNode("mul3"); ASSERT_NE(mul3_node, nullptr); EXPECT_EQ("mul1", mul3_node->input(0)); EXPECT_EQ("mul2", mul3_node->input(1)); const NodeDef* mul4_node = node_map.GetNode("mul4"); ASSERT_NE(mul4_node, nullptr); EXPECT_EQ("mul3", mul4_node->input(0)); EXPECT_EQ("b", mul4_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); // [a, b, c] - scalars, [d] - matrix auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT); auto b = ops::Variable(s.WithOpName("b"), {32}, DT_FLOAT); auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT); auto d = ops::Variable(s.WithOpName("D"), {32, 32}, DT_FLOAT); auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b); auto mul2 = ops::Mul(s.WithOpName("mul2"), c, d); auto mul3 = ops::Mul(s.WithOpName("mul3"), mul1, mul2); auto outputs = ops::Identity(s.WithOpName("outputs"), mul3); GrapplerItem item; item.fetch = {"outputs"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32})); auto d_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32})); std::vector<std::pair<string, Tensor>> feed = { {"a", a_t}, {"b", b_t}, {"c", c_t}, {"D", d_t}}; auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyMinimizeBroadcasts(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); // We expect the following rewrite(s) to occur: // // * // / \ // * * D // / \ / \ // * * -> * c // / \ / \ / \ // a b c D a b NodeMap node_map(&output); const NodeDef* mul1_node = node_map.GetNode("mul2"); ASSERT_NE(mul1_node, nullptr); EXPECT_EQ("a", mul1_node->input(0)); EXPECT_EQ("b", mul1_node->input(1)); const NodeDef* mul2_node = node_map.GetNode("mul1"); ASSERT_NE(mul2_node, nullptr); EXPECT_EQ("mul2", mul2_node->input(0)); EXPECT_EQ("c", mul2_node->input(1)); const NodeDef* mul3_node = node_map.GetNode("mul3"); ASSERT_NE(mul3_node, nullptr); EXPECT_EQ("D", mul3_node->input(0)); EXPECT_EQ("mul1", mul3_node->input(1)); auto tensors = EvaluateNodes(output, item.fetch, feed); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryFromConcat) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 3.14f, {32}); Output b = ops::Const(s.WithOpName("b"), 1.0f, {32}); Output c = ops::Const(s.WithOpName("c"), 42.0f, {32}); Output axis = ops::Const(s.WithOpName("axis"), 0, {}); Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {}); Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {}); Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {}); // Test case with chains of length 1. // Rewrites // Concat({Exp(a), Exp(b), Exp(c)}) // into // Exp(Concat({a, b, c})). Output sin_a = ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl3), a); Output exp_a = ops::Exp(s.WithOpName("exp_a").WithControlDependencies(ctrl1), sin_a); Output exp_b = ops::Exp(s.WithOpName("exp_b"), b); Output exp_c = ops::Exp(s.WithOpName("exp_c").WithControlDependencies(ctrl2), c); Output concat = ops::Concat(s.WithOpName("concat"), {exp_a, exp_b, exp_c}, axis); Output id = ops::Identity(s.WithOpName("id"), concat); // Test case with chains of length 2. // Rewrites // Concat({Cos(Exp(a)), Cos(Exp(b)), Cos(Exp(c))}) // into // Cos(Exp(Concat({a, b, c}))). Output exp_a2 = ops::Exp(s.WithOpName("exp_a2").WithControlDependencies(ctrl1), sin_a); Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), b); Output exp_c2 = ops::Exp(s.WithOpName("exp_c2").WithControlDependencies(ctrl2), c); Output cos_exp_a2 = ops::Cos( s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl1), exp_a2); Output cos_exp_b2 = ops::Cos( s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2); Output cos_exp_c2 = ops::Cos(s.WithOpName("cos_exp_c2"), exp_c2); Output concat2 = ops::Concat(s.WithOpName("concat2"), {cos_exp_a2, cos_exp_b2, cos_exp_c2}, axis); Output id2 = ops::Identity(s.WithOpName("id2"), concat2); GrapplerItem item; item.fetch = {"id", "id2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyHoistCWiseUnaryChains(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); int found = 0; for (const NodeDef& node : output.node()) { if (node.name() == "concat") { EXPECT_EQ(6, node.input_size()); EXPECT_EQ("sin_a", node.input(0)); EXPECT_EQ("b", node.input(1)); EXPECT_EQ("c", node.input(2)); EXPECT_EQ("axis", node.input(3)); EXPECT_EQ("^ctrl1", node.input(4)); EXPECT_EQ("^ctrl2", node.input(5)); found++; } if (node.name() == "exp_a") { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("concat", node.input(0)); EXPECT_EQ("^ctrl1", node.input(1)); found++; } if (node.name() == "id") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("exp_a", node.input(0)); found++; } if (node.name() == "concat2") { EXPECT_EQ(7, node.input_size()); EXPECT_EQ("sin_a", node.input(0)); EXPECT_EQ("b", node.input(1)); EXPECT_EQ("c", node.input(2)); EXPECT_EQ("axis", node.input(3)); EXPECT_EQ("^ctrl1", node.input(4)); EXPECT_EQ("^ctrl2", node.input(5)); EXPECT_EQ("^ctrl3", node.input(6)); found++; } if (node.name() == "exp_a2") { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("concat2", node.input(0)); EXPECT_EQ("^ctrl1", node.input(1)); found++; } if (node.name() == "cos_exp_a2") { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("exp_a2", node.input(0)); EXPECT_EQ("^ctrl1", node.input(1)); found++; } if (node.name() == "id2") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("cos_exp_a2", node.input(0)); found++; } } EXPECT_EQ(7, found); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(tensors.size(), tensors_expected.size()); EXPECT_EQ(tensors.size(), item.fetch.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6); } } TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryIntoSplit) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Const(s.WithOpName("x"), 3.1415f, {32}); Output axis = ops::Const(s.WithOpName("axis"), 0, {}); Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {}); Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {}); Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {}); // Test case with chains of length 1. // Rewrites // [Sin(y) for y in Split(x)] // into // [y for y in Split(Sin(x))]. ops::Split split1(s.WithOpName("split1"), axis, x, 2); Output sin_a = ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl1), split1[0]); Output id_a = ops::Identity(s.WithOpName("id_a"), sin_a); Output sin_b = ops::Sin(s.WithOpName("sin_b"), split1[1]); Output exp_b = ops::Exp(s.WithOpName("exp_b"), sin_b); Output id_b = ops::Identity(s.WithOpName("id_b"), exp_b); // Test case with SplitV and chains of length 2. // Rewrites // [Cos(Exp(y)) for y in Split(x)] // into // [y for y in Split(Cos(Exp(x)))]. Output size_splits2 = ops::Const(s.WithOpName("size_splits2"), {20, 12}, {2}); ops::SplitV split2(s.WithOpName("split2"), x, size_splits2, axis, 2); Output exp_a2 = ops::Exp( s.WithOpName("exp_a2").WithControlDependencies(ctrl1), split2[0]); Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), split2[1]); Output cos_exp_a2 = ops::Cos( s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl2), exp_a2); Output cos_exp_b2 = ops::Cos( s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2); Output id_a2 = ops::Identity(s.WithOpName("id_a2"), cos_exp_a2); Output id_b2 = ops::Identity(s.WithOpName("id_b2"), cos_exp_b2); GrapplerItem item; item.fetch = {"id_a", "id_b", "id_a2", "id_b2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyHoistCWiseUnaryChains(&optimizer); OptimizeTwiceAndPrune(&optimizer, &item, &output); int found = 0; for (const NodeDef& node : output.node()) { // The following 6 nodes should be pruned. EXPECT_NE(node.name(), "sin_a"); EXPECT_NE(node.name(), "sin_b"); EXPECT_NE(node.name(), "exp_a2"); EXPECT_NE(node.name(), "exp_b2"); EXPECT_NE(node.name(), "cos_exp_a2"); EXPECT_NE(node.name(), "cos_exp_b2"); if (node.name() == "split1") { EXPECT_EQ(2, node.input_size()); EXPECT_EQ("axis", node.input(0)); EXPECT_EQ("ArithmeticOptimizer/_sin_a_split1", node.input(1)); found++; } if (node.name() == "ArithmeticOptimizer/_sin_a_split1") { EXPECT_EQ("Sin", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^ctrl1", node.input(1)); found++; } if (node.name() == "id_a") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("split1", node.input(0)); found++; } if (node.name() == "exp_b") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("split1:1", node.input(0)); found++; } if (node.name() == "id_b") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("exp_b", node.input(0)); found++; } if (node.name() == "ArithmeticOptimizer/_exp_a2_split2") { EXPECT_EQ("Exp", node.op()); EXPECT_EQ(4, node.input_size()); EXPECT_EQ("x", node.input(0)); EXPECT_EQ("^ctrl1", node.input(1)); EXPECT_EQ("^ctrl2", node.input(2)); EXPECT_EQ("^ctrl3", node.input(3)); found++; } if (node.name() == "ArithmeticOptimizer/_cos_exp_a2_split2") { EXPECT_EQ("Cos", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("ArithmeticOptimizer/_exp_a2_split2", node.input(0)); found++; } if (node.name() == "split2") { EXPECT_EQ(3, node.input_size()); EXPECT_EQ("ArithmeticOptimizer/_cos_exp_a2_split2", node.input(0)); EXPECT_EQ("size_splits2", node.input(1)); EXPECT_EQ("axis", node.input(2)); found++; } if (node.name() == "id_a2") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("split2", node.input(0)); found++; } if (node.name() == "id_b2") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("split2:1", node.input(0)); found++; } } EXPECT_EQ(10, found); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(tensors.size(), tensors_expected.size()); EXPECT_EQ(tensors.size(), item.fetch.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6); } } TEST_F(ArithmeticOptimizerTest, RemoveIdempotent) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 3.14f, {32}); Output sn1 = ops::Snapshot(s.WithOpName("sn1"), a); Output sn2 = ops::Snapshot(s.WithOpName("sn2"), sn1); Output out1 = ops::Identity(s.WithOpName("out1"), sn2); Output id1 = ops::Identity(s.WithOpName("id1"), a); Output id2 = ops::Identity(s.WithOpName("id2"), id1); Output out2 = ops::Identity(s.WithOpName("out2"), id2); GrapplerItem item; item.fetch = {"out1", "out2"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveIdempotent(&optimizer); OptimizeTwice(&optimizer, &item, &output); EXPECT_EQ(7, output.node_size()); int found = 0; for (const NodeDef& node : output.node()) { if (node.name() == "out1") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("sn1", node.input(0)); found++; } else if (node.name() == "out2") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("id1", node.input(0)); found++; } else if (node.name() == "sn1") { EXPECT_EQ(1, node.input_size()); EXPECT_EQ("a", node.input(0)); found++; } } EXPECT_EQ(3, found); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(tensors.size(), tensors_expected.size()); EXPECT_EQ(tensors.size(), item.fetch.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6); } } TEST_F(ArithmeticOptimizerTest, RemoveLogicalNot) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 3.14f, {32}); Output b = ops::Const(s.WithOpName("b"), -3.14f, {32}); Output eq = ops::Equal(s.WithOpName("eq"), a, b); Output neq = ops::NotEqual(s.WithOpName("neq"), a, b); Output lt = ops::Less(s.WithOpName("lt"), a, b); Output le = ops::LessEqual(s.WithOpName("le"), a, b); Output gt = ops::Greater(s.WithOpName("gt"), a, b); Output ge = ops::GreaterEqual(s.WithOpName("ge"), a, b); // not_eq is reserved Output not_eq1 = ops::LogicalNot(s.WithOpName("not_eq1"), eq); Output not_neq = ops::LogicalNot(s.WithOpName("not_neq"), neq); Output not_lt = ops::LogicalNot(s.WithOpName("not_lt"), lt); Output not_le = ops::LogicalNot(s.WithOpName("not_le"), le); Output not_gt = ops::LogicalNot(s.WithOpName("not_gt"), gt); Output not_ge = ops::LogicalNot(s.WithOpName("not_ge"), ge); Output id_not_eq = ops::Identity(s.WithOpName("id_not_eq"), not_eq1); Output id_not_neq = ops::Identity(s.WithOpName("id_not_neq"), not_neq); Output id_not_lt = ops::Identity(s.WithOpName("id_not_lt"), not_lt); Output id_not_le = ops::Identity(s.WithOpName("id_not_le"), not_le); Output id_not_gt = ops::Identity(s.WithOpName("id_not_gt"), not_gt); Output id_not_ge = ops::Identity(s.WithOpName("id_not_ge"), not_ge); GrapplerItem item; item.fetch = {"id_not_eq", "id_not_neq", "id_not_lt", "id_not_le", "id_not_gt", "id_not_ge"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyRemoveLogicalNot(&optimizer); OptimizeTwice(&optimizer, &item, &output); int found = 0; for (const NodeDef& node : output.node()) { if (node.name() == "id_not_eq") { EXPECT_EQ("eq", node.input(0)); ++found; } if (node.name() == "id_not_neq") { EXPECT_EQ("neq", node.input(0)); ++found; } if (node.name() == "id_not_lt") { EXPECT_EQ("lt", node.input(0)); ++found; } if (node.name() == "id_not_le") { EXPECT_EQ("le", node.input(0)); ++found; } if (node.name() == "id_not_gt") { EXPECT_EQ("gt", node.input(0)); ++found; } if (node.name() == "id_not_ge") { EXPECT_EQ("ge", node.input(0)); ++found; } if (node.name() == "eq") { EXPECT_EQ("NotEqual", node.op()); ++found; } if (node.name() == "neq") { EXPECT_EQ("Equal", node.op()); ++found; } if (node.name() == "lt") { EXPECT_EQ("GreaterEqual", node.op()); ++found; } if (node.name() == "le") { EXPECT_EQ("Greater", node.op()); ++found; } if (node.name() == "gt") { EXPECT_EQ("LessEqual", node.op()); ++found; } if (node.name() == "ge") { EXPECT_EQ("Less", node.op()); ++found; } } EXPECT_EQ(12, found); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(tensors.size(), tensors_expected.size()); EXPECT_EQ(tensors.size(), item.fetch.size()); for (int i = 0; i < item.fetch.size(); ++i) { test::ExpectTensorEqual<bool>(tensors_expected[i], tensors[i]); } } TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x); Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0}); Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max); GrapplerItem item; item.fetch = {"final_out"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); EXPECT_EQ(item.graph.node_size(), output.node_size()); // Check if the inputs are switched int required_node_count = 0; for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "sqrt") { EXPECT_EQ("Sqrt", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("reduce_max", node.input(0)); ++required_node_count; } else if (node.name() == "reduce_max") { EXPECT_EQ("Max", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); ++required_node_count; } } EXPECT_EQ(2, required_node_count); } TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise_DoNotChangeFetchNode) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x); Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0}); Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max); GrapplerItem item; item.fetch = {"sqrt", "final_out"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(2, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer); OptimizeTwice(&optimizer, &item, &output); // Should be a NoOp since we are not allowed to change the output of fetch // nodes. VerifyGraphsMatch(item.graph, output, __LINE__); } TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWiseNonIncreasing) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output neg = ops::Neg(s.WithOpName("neg"), x); Output reduce_max = ops::Max(s.WithOpName("reduce_max"), neg, {0}); Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max); GrapplerItem item; item.fetch = {"final_out"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); EXPECT_EQ(item.graph.node_size(), output.node_size()); // Check if the inputs are switched int required_node_count = 0; for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "neg") { EXPECT_EQ("Neg", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("reduce_max", node.input(0)); ++required_node_count; } else if (node.name() == "reduce_max") { EXPECT_EQ("Min", node.op()); EXPECT_EQ(2, node.input_size()); EXPECT_EQ("x", node.input(0)); ++required_node_count; } } EXPECT_EQ(2, required_node_count); } TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x); Output log = ops::Log(s.WithOpName("log"), sqrt); Output relu = ops::Relu(s.WithOpName("relu"), log); Output final_out = ops::Identity(s.WithOpName("final_out"), relu); GrapplerItem item; item.fetch = {"final_out"}; TF_CHECK_OK(s.ToGraphDef(&item.graph)); // Place all nodes on CPU. for (int i = 0; i < item.graph.node_size(); ++i) { item.graph.mutable_node(i)->set_device("/device:CPU:0"); } auto tensors_expected = EvaluateNodes(item.graph, item.fetch); EXPECT_EQ(1, tensors_expected.size()); GraphDef output; ArithmeticOptimizer optimizer; EnableOnlyUnaryOpsComposition(&optimizer); OptimizeAndPrune(&optimizer, &item, &output); EXPECT_EQ(3, output.node_size()); // Check that Sqrt/Log/Relu were replaced with a single op. int required_node_count = 0; for (int i = 0; i < output.node_size(); ++i) { const NodeDef& node = output.node(i); if (node.name() == "final_out") { EXPECT_EQ("Identity", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("relu/unary_ops_composition", node.input(0)); ++required_node_count; } else if (node.name() == "relu/unary_ops_composition") { EXPECT_EQ("_UnaryOpsComposition", node.op()); EXPECT_EQ(1, node.input_size()); EXPECT_EQ("x", node.input(0)); auto op_names = node.attr().at("op_names").list().s(); EXPECT_EQ(3, op_names.size()); EXPECT_EQ("Sqrt", op_names[0]); EXPECT_EQ("Log", op_names[1]); EXPECT_EQ("Relu", op_names[2]); ++required_node_count; } } EXPECT_EQ(2, required_node_count); auto tensors = EvaluateNodes(output, item.fetch); EXPECT_EQ(1, tensors.size()); test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6); } } // namespace grappler } // namespace tensorflow
Java
require 'spec_helper' describe Puppet::Type.type(:openldap_access) do describe 'namevar title patterns' do it 'handles componsite name' do access = described_class.new(name: 'to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:name]).to eq('to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:what]).to eq('attrs=userPassword,shadowLastChange') expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']]) end it 'handles componsite name with position' do access = described_class.new(name: '{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:name]).to eq('{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:position]).to eq('0') expect(access[:what]).to eq('attrs=userPassword,shadowLastChange') expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']]) end it 'handles componsite name with position' do access = described_class.new(name: '{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth on dc=example,dc=com') expect(access[:name]).to eq('{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth on dc=example,dc=com') expect(access[:position]).to eq('0') expect(access[:what]).to eq('attrs=userPassword,shadowLastChange') expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']]) expect(access[:suffix]).to eq('dc=example,dc=com') end it 'handles specific value of attr' do access = described_class.new(name: 'to attrs=objectClass val=posixAccount by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:name]).to eq('to attrs=objectClass val=posixAccount by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:what]).to eq('attrs=objectClass val=posixAccount') expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']]) end end describe 'access' do it 'handles array of values' do access = described_class.new(name: 'foo', access: ['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']) expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write'], ['by anonymous auth']]) end it 'handles string' do access = described_class.new(name: 'foo', access: 'by dn="cn=admin,dc=example,dc=com" write by anonymous auth') expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']]) end end end
Java
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/forecast/ForecastService_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <utility> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace ForecastService { namespace Model { class AWS_FORECASTSERVICE_API CreateForecastExportJobResult { public: CreateForecastExportJobResult(); CreateForecastExportJobResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); CreateForecastExportJobResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline const Aws::String& GetForecastExportJobArn() const{ return m_forecastExportJobArn; } /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline void SetForecastExportJobArn(const Aws::String& value) { m_forecastExportJobArn = value; } /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline void SetForecastExportJobArn(Aws::String&& value) { m_forecastExportJobArn = std::move(value); } /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline void SetForecastExportJobArn(const char* value) { m_forecastExportJobArn.assign(value); } /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline CreateForecastExportJobResult& WithForecastExportJobArn(const Aws::String& value) { SetForecastExportJobArn(value); return *this;} /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline CreateForecastExportJobResult& WithForecastExportJobArn(Aws::String&& value) { SetForecastExportJobArn(std::move(value)); return *this;} /** * <p>The Amazon Resource Name (ARN) of the export job.</p> */ inline CreateForecastExportJobResult& WithForecastExportJobArn(const char* value) { SetForecastExportJobArn(value); return *this;} private: Aws::String m_forecastExportJobArn; }; } // namespace Model } // namespace ForecastService } // namespace Aws
Java
/* Copyright 2020 The Knative Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package generators import ( "io" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" "k8s.io/klog" ) // reconcilerControllerStubGenerator produces a file of the stub of the // controller for a custom impl with injection. type reconcilerControllerStubGenerator struct { generator.DefaultGen outputPackage string imports namer.ImportTracker typeToGenerate *types.Type reconcilerPkg string informerPackagePath string reconcilerClass string hasReconcilerClass bool } var _ generator.Generator = (*reconcilerControllerStubGenerator)(nil) func (g *reconcilerControllerStubGenerator) Filter(c *generator.Context, t *types.Type) bool { // Only process the type for this generator. return t == g.typeToGenerate } func (g *reconcilerControllerStubGenerator) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ "raw": namer.NewRawNamer(g.outputPackage, g.imports), } } func (g *reconcilerControllerStubGenerator) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) return } func (g *reconcilerControllerStubGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") klog.V(5).Info("processing type ", t) m := map[string]interface{}{ "type": t, "class": g.reconcilerClass, "hasClass": g.hasReconcilerClass, "informerGet": c.Universe.Function(types.Name{ Package: g.informerPackagePath, Name: "Get", }), "controllerImpl": c.Universe.Type(types.Name{Package: "knative.dev/pkg/controller", Name: "Impl"}), "reconcilerNewImpl": c.Universe.Type(types.Name{ Package: g.reconcilerPkg, Name: "NewImpl", }), "loggingFromContext": c.Universe.Function(types.Name{ Package: "knative.dev/pkg/logging", Name: "FromContext", }), "contextContext": c.Universe.Type(types.Name{ Package: "context", Name: "Context", }), "configmapWatcher": c.Universe.Type(types.Name{ Package: "knative.dev/pkg/configmap", Name: "Watcher", }), "classAnnotationKey": c.Universe.Variable(types.Name{ Package: g.reconcilerPkg, Name: "ClassAnnotationKey", }), "annotationFilterFunc": c.Universe.Function(types.Name{ Package: "knative.dev/pkg/reconciler", Name: "AnnotationFilterFunc", }), "filterHandler": c.Universe.Type(types.Name{ Package: "k8s.io/client-go/tools/cache", Name: "FilteringResourceEventHandler", }), } sw.Do(reconcilerControllerStub, m) return sw.Error() } var reconcilerControllerStub = ` // TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT // NewController creates a Reconciler for {{.type|public}} and returns the result of NewImpl. func NewController( ctx {{.contextContext|raw}}, cmw {{.configmapWatcher|raw}}, ) *{{.controllerImpl|raw}} { logger := {{.loggingFromContext|raw}}(ctx) {{.type|lowercaseSingular}}Informer := {{.informerGet|raw}}(ctx) {{if .hasClass}} classValue := "default" // TODO: update this to the appropriate value. classFilter := {{.annotationFilterFunc|raw}}({{.classAnnotationKey|raw}}, classValue, false /*allowUnset*/) {{end}} // TODO: setup additional informers here. {{if .hasClass}}// TODO: remember to use the classFilter from above to filter appropriately.{{end}} r := &Reconciler{} impl := {{.reconcilerNewImpl|raw}}(ctx, r{{if .hasClass}}, classValue{{end}}) logger.Info("Setting up event handlers.") {{if .hasClass}} {{.type|lowercaseSingular}}Informer.Informer().AddEventHandler({{.filterHandler|raw}}{ FilterFunc: classFilter, Handler: controller.HandleAll(impl.Enqueue), }) {{else}} {{.type|lowercaseSingular}}Informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) {{end}} // TODO: add additional informer event handlers here. return impl } `
Java
/* * Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.mss.examples.petstore.util.fe.view; import java.io.Serializable; import javax.faces.bean.ManagedBean; import javax.faces.bean.SessionScoped; /** * Bean classes used for JSF model. */ @ManagedBean @SessionScoped public class NavigationBean implements Serializable { private static final long serialVersionUID = -8628674465932953415L; public String redirectToStoreWelcome() { return "pet/list.xhtml?faces-redirect=true"; } public String redirectToAdminWelcome() { return "pet/index.xhtml?faces-redirect=true"; } public String toLogin() { return "/login.xhtml"; } public String backtoList() { return "list"; } }
Java
/* * Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.identity.oauth2.validators; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import org.wso2.carbon.identity.oauth2.dto.OAuth2TokenValidationRequestDTO; import org.wso2.carbon.identity.oauth2.dto.OAuth2TokenValidationResponseDTO; import static org.testng.Assert.assertEquals; public class OAuth2TokenValidationMessageContextTest { private OAuth2TokenValidationMessageContext oAuth2TokenValidationMessageContext; private OAuth2TokenValidationRequestDTO requestDTO; private OAuth2TokenValidationResponseDTO responseDTO; @BeforeMethod public void setUp() throws Exception { requestDTO = new OAuth2TokenValidationRequestDTO(); responseDTO = new OAuth2TokenValidationResponseDTO(); oAuth2TokenValidationMessageContext = new OAuth2TokenValidationMessageContext(requestDTO, responseDTO); } @Test public void testGetRequestDTO() throws Exception { assertEquals(oAuth2TokenValidationMessageContext.getRequestDTO(), requestDTO); } @Test public void testGetResponseDTO() throws Exception { assertEquals(oAuth2TokenValidationMessageContext.getResponseDTO(), responseDTO); } @Test public void testAddProperty() throws Exception { oAuth2TokenValidationMessageContext.addProperty("testProperty", "testValue"); assertEquals(oAuth2TokenValidationMessageContext.getProperty("testProperty"), "testValue"); } @Test public void testGetProperty() throws Exception { oAuth2TokenValidationMessageContext.addProperty("testProperty", "testValue"); assertEquals(oAuth2TokenValidationMessageContext.getProperty("testProperty"), "testValue"); } }
Java
/* * XDD - a data movement and benchmarking toolkit * * Copyright (C) 1992-2013 I/O Performance, Inc. * Copyright (C) 2009-2013 UT-Battelle, LLC * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2, as published by the Free Software * Foundation. See file COPYING. * */ /* worker_thread.c */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <strings.h> #include <string.h> #include <errno.h> #include <pthread.h> #define THIS_IS_A_SUBROUTINE #include "bx_data_structures.h" #define DEBUG 0 /************************************************************************** * The Worker Thread **************************************************************************/ // There are one or more Worker Threads per target. // A Worker Thread will indicate that it is WAITING for something to do and // place itself on the Worker Thread Data Structure queue of its target. // Eventually the target will wake up a Worker Thread which is indicated // by a non-zero value in the bx_wd_released variable in the Worker Thread // Data Structure for this Worker Thread. // Upon waking up, the Worker Thread will perform an INPUT or OUTPUT // operation depending on its target's designation. The Worker Thread // will have a buffer header structure that contains the location in the // file to perform the operation, the number of bytes to transfer, and the // I/O memory buffer. // Upon completing the requested I/O operation, the Worker Thread will // stuff its Buffer Header on to the "next buffer queue". In other words, // if this Worker Thread is an INPUT Worker Thread then it just read data // into its buffer. // Therefore, upon completion of the read, this Worker Thread will stuff // its Buffer Header on the Buffer Header Queue of the OUTPUT target and // wake up the OUTPUT target if it is waiting for a buffer. // Likewise, if this Worker Thread is an OUTPUT Worker Thread then it just // wrote data from its buffer. Therefore, upon completion of the write, this // Worker Thread will stuff its Buffer Header on the Buffer Header Queue // of the INPUT target and wake up the INPUT target if it is waiting // for a buffer. // // After putting its Buffer Header on the appropriate queue this Worker // Thread will stuff itself back onto its Worker Thread Data Structure // queue and wait for something to do. // // At some point the Worker Thread will wake up and find the "TERMINATE" // flag set in its Worker Thread Data Structure . At this point the Worker // Thread will break the loop and terminate. void * worker_thread_main(void *pin) { int status; struct bx_td *bx_tdp; struct bx_wd *bx_wdp; struct bx_buffer_queue *qp; struct bx_buffer_header *bufhdrp; nclk_t nclk; bx_wdp = (struct bx_wd *)pin; bx_tdp = bx_wdp->bx_wd_my_bx_tdp; nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu: worker_thread_main: ENTER: bx_wdp=%p, bx_tdp=%p\n", (unsigned long long int)nclk,bx_wdp, bx_tdp); status = 0; nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - ENTER %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number); while (1) { // Set flag to indicate that this worker_thread is WAITING bx_wdp->bx_wd_flags |= BX_WD_WAITING; // Enqueue this Worker Thread Data Structure on the bx_wd_queue for this target bx_wd_enqueue(bx_wdp, bx_wdp->bx_wd_my_queue); // Wait for the target thread to release me pthread_mutex_lock(&bx_wdp->bx_wd_mutex); nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - got the bx_wd_mutex lock - waiting for something to do - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number); bx_wdp->bx_wd_flags |= BX_WD_WAITING; while (1 != bx_wdp->bx_wd_released) { pthread_cond_wait(&bx_wdp->bx_wd_conditional, &bx_wdp->bx_wd_mutex); } bx_wdp->bx_wd_flags &= ~BX_WD_WAITING; bx_wdp->bx_wd_released = 0; if (bx_wdp->bx_wd_flags & BX_WD_TERMINATE) break; nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - got the bx_wd_mutex lock - GOT something to do - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number); bx_wd_show(bx_wdp); bufhdrp = bx_wdp->bx_wd_bufhdrp; if (bx_tdp->bx_td_flags & BX_TD_INPUT) { // Read the input file status = pread(bx_wdp->bx_wd_fd, bufhdrp->bh_startp, bufhdrp->bh_transfer_size, bufhdrp->bh_file_offset); if (status < 0) { perror("Read error"); bufhdrp->bh_valid_size = 0; } else { bufhdrp->bh_valid_size = status; } nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - read %d of %d bytes starting at offset %d - time %zd\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status,bufhdrp->bh_transfer_size,bufhdrp->bh_file_offset); // Put this buffer on the output target queue qp = &bx_td[bx_wdp->bx_wd_next_buffer_queue].bx_td_buffer_queue; bh_enqueue(bx_wdp->bx_wd_bufhdrp, qp); } else { // Must be output status = pwrite(bx_wdp->bx_wd_fd, bufhdrp->bh_startp, bufhdrp->bh_transfer_size, bufhdrp->bh_file_offset); if (status < 0) { perror("Write error"); bufhdrp->bh_valid_size = 0; } else { bufhdrp->bh_valid_size = status; } nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - wrote %d of %d bytes starting at offset %d - requeuing buffer %zd - time %p\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status,bufhdrp->bh_transfer_size,bufhdrp->bh_file_offset, bufhdrp); // Put this buffer on the input target queue qp = &bx_td[bx_wdp->bx_wd_next_buffer_queue].bx_td_buffer_queue; bh_enqueue(bufhdrp, qp); bufqueue_show(qp); } nclk_now(&nclk); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - transferred %d bytes - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status); if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - releasing the bx_wd_mutex lock %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number); pthread_mutex_unlock(&bx_wdp->bx_wd_mutex); } if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s %d - Exit \n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number); return 0; } // End of worker_thread_main()
Java
/* Copyright 2011-2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.google.security.zynamics.binnavi.disassembly; import com.google.security.zynamics.zylib.gui.zygraph.edges.IViewEdgeListener; /** * Interface for objects that want to be notified about changes in edges. */ public interface INaviEdgeListener extends IViewEdgeListener { /** * Invoked after the global comment of an edge changed. * * @param naviEdge The edge whose global comment changed. */ void changedGlobalComment(CNaviViewEdge naviEdge); /** * Invoked after the local comment of an edge changed. * * @param naviEdge The edge whose local comment changed. */ void changedLocalComment(CNaviViewEdge naviEdge); }
Java
package com.bazaarvoice.emodb.common.dropwizard.leader; import com.bazaarvoice.curator.recipes.leader.LeaderService; import com.bazaarvoice.emodb.common.dropwizard.task.TaskRegistry; import com.bazaarvoice.emodb.common.zookeeper.leader.PartitionedLeaderService; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.Maps; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.Service; import com.google.inject.Inject; import io.dropwizard.servlets.tasks.Task; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.PrintWriter; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentMap; /** * Shows the current status of leadership processes managed by {@link LeaderService}. Allows terminating * individual leadership processes, but such that they can be restarted only by restarting the entire server. */ public class LeaderServiceTask extends Task { private static final Logger _log = LoggerFactory.getLogger(LeaderServiceTask.class); private final ConcurrentMap<String, LeaderService> _selectorMap = Maps.newConcurrentMap(); @Inject public LeaderServiceTask(TaskRegistry tasks) { super("leader"); tasks.addTask(this); } public void register(final String name, final LeaderService leaderService) { _selectorMap.put(name, leaderService); // Unregister automatically to avoid memory leaks. leaderService.addListener(new AbstractServiceListener() { @Override public void terminated(Service.State from) { unregister(name, leaderService); } @Override public void failed(Service.State from, Throwable failure) { unregister(name, leaderService); } }, MoreExecutors.sameThreadExecutor()); } public void register(final String name, final PartitionedLeaderService partitionedLeaderService) { int partition = 0; for (LeaderService leaderService : partitionedLeaderService.getPartitionLeaderServices()) { register(String.format("%s-%d", name, partition++), leaderService); } } public void unregister(String name, LeaderService leaderService) { _selectorMap.remove(name, leaderService); } @Override public void execute(ImmutableMultimap<String, String> parameters, PrintWriter out) throws Exception { // The 'release' argument tells a server to give up leadership and let a new leader be elected, possibly // re-electing the current server. This is useful for rebalancing leader-controlled activities. for (String name : parameters.get("release")) { LeaderService leaderService = _selectorMap.get(name); if (leaderService == null) { out.printf("Unknown leader process: %s%n", name); continue; } Service actualService = leaderService.getCurrentDelegateService().orNull(); if (actualService == null || !actualService.isRunning()) { out.printf("Process is not currently elected leader: %s%n", name); continue; } _log.warn("Temporarily releasing leadership for process: {}", name); out.printf("Temporarily releasing leadership for process: %s, cluster will elect a new leader.%n", name); actualService.stopAndWait(); } // The 'terminate' argument tells a server to give up leadership permanently (or until the server restarts). for (String name : parameters.get("terminate")) { LeaderService leaderService = _selectorMap.get(name); if (leaderService == null) { out.printf("Unknown leader process: %s%n", name); continue; } _log.warn("Terminating leader process for: {}", name); out.printf("Terminating leader process for: %s. Restart the server to restart the leader process.%n", name); leaderService.stopAndWait(); } // Print current status. for (Map.Entry<String, LeaderService> entry : new TreeMap<>(_selectorMap).entrySet()) { String name = entry.getKey(); LeaderService leaderService = entry.getValue(); out.printf("%s: %s (leader=%s)%n", name, describeState(leaderService.state(), leaderService.hasLeadership()), getLeaderId(leaderService)); } } private String describeState(Service.State state, boolean hasLeadership) { if (state == Service.State.RUNNING && !hasLeadership) { return "waiting to win leadership election"; } else { return state.name(); } } private String getLeaderId(LeaderService leaderService) { try { return leaderService.getLeader().getId(); } catch (Exception e) { return "<unknown>"; } } }
Java
<!DOCTYPE html> <html devsite> <head> <meta name="project_path" value="/web/tools/workbox/_project.yaml" /> <meta name="book_path" value="/web/tools/workbox/_book.yaml" /> <meta name="gtm_var" data-key="docType" data-value="reference"> <title>Source: workbox-webpack-plugin/src/inject-manifest.js</title> <link href="jsdoc.css" rel="stylesheet"> </head> <body> <div id="jsdoc-body-container"> <div id="jsdoc-content"> <div id="jsdoc-content-container"> <div id="jsdoc-banner" role="banner"> </div> <div id="jsdoc-main" role="main"> <header class="page-header"> <h1>Source: workbox-webpack-plugin/src/inject-manifest.js</h1> </header> <article> <pre class="prettyprint linenums"><code>/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the &quot;License&quot;); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an &quot;AS IS&quot; BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ const assert &#x3D; require(&#x27;assert&#x27;); const path &#x3D; require(&#x27;path&#x27;); const {getManifest} &#x3D; require(&#x27;workbox-build&#x27;); const convertStringToAsset &#x3D; require(&#x27;./lib/convert-string-to-asset&#x27;); const getAssetHash &#x3D; require(&#x27;./lib/get-asset-hash&#x27;); const getManifestEntriesFromCompilation &#x3D; require(&#x27;./lib/get-manifest-entries-from-compilation&#x27;); const getWorkboxSWImports &#x3D; require(&#x27;./lib/get-workbox-sw-imports&#x27;); const readFileWrapper &#x3D; require(&#x27;./lib/read-file-wrapper&#x27;); const sanitizeConfig &#x3D; require(&#x27;./lib/sanitize-config&#x27;); const stringifyManifest &#x3D; require(&#x27;./lib/stringify-manifest&#x27;); /** * This class supports taking an existing service worker file which already * uses Workbox, and injecting a reference to a [precache manifest]() into it, * allowing it to efficiently precache the assets created by a webpack build. * * Use an instance of &#x60;InjectManifest&#x60; in the * [&#x60;plugins&#x60; array](https://webpack.js.org/concepts/plugins/#usage) of a * webpack config. * * @module workbox-webpack-plugin */ class InjectManifest { /** * Creates an instance of InjectManifest. * * @param {Object} [config] See the * [configuration guide](/web/tools/workbox/modules/workbox-webpack-plugin#configuration) * for all supported options and defaults. */ constructor(config &#x3D; {}) { assert(typeof config.swSrc &#x3D;&#x3D;&#x3D; &#x27;string&#x27;, &#x60;swSrc must be set to the path &#x60; + &#x60;to an existing service worker file.&#x60;); this.config &#x3D; Object.assign({}, { chunks: [], exclude: [ // Exclude source maps. /\.map$/, // Exclude anything starting with manifest and ending .js or .json. /^manifest.*\.js(?:on)?$/, ], excludeChunks: [], importScripts: [], importWorkboxFrom: &#x27;cdn&#x27;, swDest: path.basename(config.swSrc), }, config); } /** * @param {Object} compilation The webpack compilation. * @param {Function} readFile The function to use when reading files, * derived from compiler.inputFileSystem. * @private */ async handleEmit(compilation, readFile) { if (this.config.importWorkboxFrom &#x3D;&#x3D;&#x3D; &#x27;local&#x27;) { throw new Error(&#x60;importWorkboxFrom can not be set to &#x27;local&#x27; when using&#x60; + &#x60; InjectManifest. Please use &#x27;cdn&#x27; or a chunk name instead.&#x60;); } const workboxSWImports &#x3D; await getWorkboxSWImports( compilation, this.config); let entries &#x3D; getManifestEntriesFromCompilation(compilation, this.config); const sanitizedConfig &#x3D; sanitizeConfig.forGetManifest(this.config); // If there are any &quot;extra&quot; config options remaining after we remove the // ones that are used natively by the plugin, then assume that they should // be passed on to workbox-build.getManifest() to generate extra entries. if (Object.keys(sanitizedConfig).length &gt; 0) { // If globPatterns isn&#x27;t explicitly set, then default to [], instead of // the workbox-build.getManifest() default. sanitizedConfig.globPatterns &#x3D; sanitizedConfig.globPatterns || []; const {manifestEntries} &#x3D; await getManifest(sanitizedConfig); entries &#x3D; entries.concat(manifestEntries); } const manifestString &#x3D; stringifyManifest(entries); const manifestAsset &#x3D; convertStringToAsset(manifestString); const manifestHash &#x3D; getAssetHash(manifestAsset); const manifestFilename &#x3D; &#x60;precache-manifest.${manifestHash}.js&#x60;; compilation.assets[manifestFilename] &#x3D; manifestAsset; this.config.importScripts.push( (compilation.options.output.publicPath || &#x27;&#x27;) + manifestFilename); // workboxSWImports might be null if importWorkboxFrom is &#x27;disabled&#x27;. if (workboxSWImports) { // workboxSWImport is an array, so use concat() rather than push(). this.config.importScripts &#x3D; this.config.importScripts.concat( workboxSWImports); } const originalSWString &#x3D; await readFileWrapper(readFile, this.config.swSrc); const importScriptsString &#x3D; this.config.importScripts .map(JSON.stringify) .join(&#x27;, &#x27;); const postInjectionSWString &#x3D; &#x60;importScripts(${importScriptsString}); ${originalSWString} &#x60;; compilation.assets[this.config.swDest] &#x3D; convertStringToAsset(postInjectionSWString); } /** * @param {Object} [compiler] default compiler object passed from webpack * * @private */ apply(compiler) { compiler.plugin(&#x27;emit&#x27;, (compilation, callback) &#x3D;&gt; { this.handleEmit(compilation, compiler.inputFileSystem._readFile) .then(callback) .catch(callback); }); } } module.exports &#x3D; InjectManifest; </code></pre> </article> </div> </div> <nav id="jsdoc-toc-nav" role="navigation"></nav> </div> </div> </body> </html>
Java
/** * Copyright 2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ var should = require("should"); var request = require('supertest'); var express = require('express'); var when = require('when'); var app = express(); var RED = require("../../../red/red.js"); var storage = require("../../../red/storage"); var library = require("../../../red/api/library"); describe("library api", function() { function initStorage(_flows,_libraryEntries) { var flows = _flows; var libraryEntries = _libraryEntries; storage.init({ storageModule: { init: function() { return when.resolve(); }, getAllFlows: function() { return when.resolve(flows); }, getFlow: function(fn) { if (flows[fn]) { return when.resolve(flows[fn]); } else { return when.reject(); } }, saveFlow: function(fn,data) { flows[fn] = data; return when.resolve(); }, getLibraryEntry: function(type,path) { if (libraryEntries[type] && libraryEntries[type][path]) { return when.resolve(libraryEntries[type][path]); } else { return when.reject(); } }, saveLibraryEntry: function(type,path,meta,body) { libraryEntries[type][path] = body; return when.resolve(); } } }); } describe("flows", function() { var app; before(function() { app = express(); app.use(express.json()); app.get("/library/flows",library.getAll); app.post(new RegExp("/library/flows\/(.*)"),library.post); app.get(new RegExp("/library/flows\/(.*)"),library.get); }); it('returns empty result', function(done) { initStorage({}); request(app) .get('/library/flows') .expect(200) .end(function(err,res) { if (err) { throw err; } res.body.should.not.have.property('f'); res.body.should.not.have.property('d'); done(); }); }); it('returns 404 for non-existent entry', function(done) { initStorage({}); request(app) .get('/library/flows/foo') .expect(404) .end(done); }); it('can store and retrieve item', function(done) { initStorage({}); var flow = '[]'; request(app) .post('/library/flows/foo') .set('Content-Type', 'application/json') .send(flow) .expect(204).end(function (err, res) { if (err) { throw err; } request(app) .get('/library/flows/foo') .expect(200) .end(function(err,res) { if (err) { throw err; } res.text.should.equal(flow); done(); }); }); }); it('lists a stored item', function(done) { initStorage({f:["bar"]}); request(app) .get('/library/flows') .expect(200) .end(function(err,res) { if (err) { throw err; } res.body.should.have.property('f'); should.deepEqual(res.body.f,['bar']); done(); }); }); it('returns 403 for malicious get attempt', function(done) { initStorage({}); // without the userDir override the malicious url would be // http://127.0.0.1:1880/library/flows/../../package to // obtain package.json from the node-red root. request(app) .get('/library/flows/../../../../../package') .expect(403) .end(done); }); it('returns 403 for malicious post attempt', function(done) { initStorage({}); // without the userDir override the malicious url would be // http://127.0.0.1:1880/library/flows/../../package to // obtain package.json from the node-red root. request(app) .post('/library/flows/../../../../../package') .expect(403) .end(done); }); }); describe("type", function() { var app; before(function() { app = express(); app.use(express.json()); library.init(app); RED.library.register("test"); }); it('returns empty result', function(done) { initStorage({},{'test':{"":[]}}); request(app) .get('/library/test') .expect(200) .end(function(err,res) { if (err) { throw err; } res.body.should.not.have.property('f'); done(); }); }); it('returns 404 for non-existent entry', function(done) { initStorage({},{}); request(app) .get('/library/test/foo') .expect(404) .end(done); }); it('can store and retrieve item', function(done) { initStorage({},{'test':{}}); var flow = '[]'; request(app) .post('/library/test/foo') .set('Content-Type', 'text/plain') .send(flow) .expect(204).end(function (err, res) { if (err) { throw err; } request(app) .get('/library/test/foo') .expect(200) .end(function(err,res) { if (err) { throw err; } res.text.should.equal(flow); done(); }); }); }); it('lists a stored item', function(done) { initStorage({},{'test':{'':['abc','def']}}); request(app) .get('/library/test') .expect(200) .end(function(err,res) { if (err) { throw err; } // This response isn't strictly accurate - but it // verifies the api returns what storage gave it should.deepEqual(res.body,['abc','def']); done(); }); }); it('returns 403 for malicious access attempt', function(done) { request(app) .get('/library/test/../../../../../../../../../../etc/passwd') .expect(403) .end(done); }); it('returns 403 for malicious access attempt', function(done) { request(app) .get('/library/test/..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd') .expect(403) .end(done); }); it('returns 403 for malicious access attempt', function(done) { request(app) .post('/library/test/../../../../../../../../../../etc/passwd') .set('Content-Type', 'text/plain') .send('root:x:0:0:root:/root:/usr/bin/tclsh') .expect(403) .end(done); }); }); });
Java
#!/bin/bash # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source "${KUBE_ROOT}/cluster/gce/util.sh" detect-project &> /dev/null export PROJECT RETRIES=3 # Runs gcloud compute command with the given parameters. Up to $RETRIES will be made # to execute the command. # arguments: # $@: all stuff that goes after 'gcloud compute' function run-gcloud-compute-with-retries { for attempt in $(seq 1 ${RETRIES}); do local -r gcloud_cmd_hash=`echo "gcloud compute $@" | md5sum | cut -f1 -d" "` local -r gcloud_logfile="/tmp/gcloud_${gcloud_cmd_hash}.log" echo "" > ${gcloud_logfile} if ! gcloud compute "$@" |& tee ${gcloud_logfile}; then if [[ $(grep -c "already exists" ${gcloud_logfile}) -gt 0 ]]; then if [[ "${attempt}" == 1 ]]; then echo -e "${color_red} Failed to $1 $2 $3 as the resource hasn't been deleted from a previous run.${color_norm}" >& 2 exit 1 fi echo -e "${color_yellow}Succeeded to $1 $2 $3 in the previous attempt, but status response wasn't received.${color_norm}" return 0 fi echo -e "${color_yellow}Attempt $attempt failed to $1 $2 $3. Retrying.${color_norm}" >& 2 sleep $(($attempt * 5)) else echo -e "${color_green}Succeeded to gcloud compute $1 $2 $3.${color_norm}" return 0 fi done echo -e "${color_red} Failed to $1 $2 $3.${color_norm}" >& 2 exit 1 } function create-master-instance-with-resources { GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}" run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \ ${GCLOUD_COMMON_ARGS} \ --type "${MASTER_DISK_TYPE}" \ --size "${MASTER_DISK_SIZE}" if [ "${EVENT_PD:-false}" == "true" ]; then run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \ ${GCLOUD_COMMON_ARGS} \ --type "${MASTER_DISK_TYPE}" \ --size "${MASTER_DISK_SIZE}" fi run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \ --project "${PROJECT}" \ --region "${REGION}" -q MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \ --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \ ${GCLOUD_COMMON_ARGS} \ --address "${MASTER_IP}" \ --machine-type "${MASTER_SIZE}" \ --image-project="${MASTER_IMAGE_PROJECT}" \ --image "${MASTER_IMAGE}" \ --tags "${MASTER_TAG}" \ --network "${NETWORK}" \ --scopes "storage-ro,compute-rw,logging-write" \ --boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \ --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" if [ "${EVENT_PD:-false}" == "true" ]; then echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}" run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \ ${GCLOUD_COMMON_ARGS} \ --disk "${MASTER_NAME}-event-pd" \ --device-name="master-event-pd" fi run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \ --project "${PROJECT}" \ --network "${NETWORK}" \ --source-ranges "0.0.0.0/0" \ --target-tags "${MASTER_TAG}" \ --allow "tcp:443" } # Command to be executed is '$1'. # No. of retries is '$2' (if provided) or 1 (default). function execute-cmd-on-master-with-retries() { RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1" } function copy-files() { run-gcloud-compute-with-retries copy-files --zone="${ZONE}" --project="${PROJECT}" $@ } function delete-master-instance-and-resources { GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet" gcloud compute instances delete "${MASTER_NAME}" \ ${GCLOUD_COMMON_ARGS} || true gcloud compute disks delete "${MASTER_NAME}-pd" \ ${GCLOUD_COMMON_ARGS} || true gcloud compute disks delete "${MASTER_NAME}-event-pd" \ ${GCLOUD_COMMON_ARGS} &> /dev/null || true gcloud compute addresses delete "${MASTER_NAME}-ip" \ --project "${PROJECT}" \ --region "${REGION}" \ --quiet || true gcloud compute firewall-rules delete "${MASTER_NAME}-https" \ --project "${PROJECT}" \ --quiet || true if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then gcloud compute instances delete "${EVENT_STORE_NAME}" \ ${GCLOUD_COMMON_ARGS} || true gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \ ${GCLOUD_COMMON_ARGS} || true fi }
Java
"""Support for OpenWRT (ubus) routers.""" import logging import re from openwrt.ubus import Ubus import voluptuous as vol from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA, DeviceScanner, ) from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_DHCP_SOFTWARE = "dhcp_software" DEFAULT_DHCP_SOFTWARE = "dnsmasq" DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"] PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In( DHCP_SOFTWARES ), } ) def get_scanner(hass, config): """Validate the configuration and return an ubus scanner.""" dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE] if dhcp_sw == "dnsmasq": scanner = DnsmasqUbusDeviceScanner(config[DOMAIN]) elif dhcp_sw == "odhcpd": scanner = OdhcpdUbusDeviceScanner(config[DOMAIN]) else: scanner = UbusDeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None def _refresh_on_access_denied(func): """If remove rebooted, it lost our session so rebuild one and try again.""" def decorator(self, *args, **kwargs): """Wrap the function to refresh session_id on PermissionError.""" try: return func(self, *args, **kwargs) except PermissionError: _LOGGER.warning( "Invalid session detected." " Trying to refresh session_id and re-run RPC" ) self.ubus.connect() return func(self, *args, **kwargs) return decorator class UbusDeviceScanner(DeviceScanner): """ This class queries a wireless router running OpenWrt firmware. Adapted from Tomato scanner. """ def __init__(self, config): """Initialize the scanner.""" host = config[CONF_HOST] self.username = config[CONF_USERNAME] self.password = config[CONF_PASSWORD] self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);") self.last_results = {} self.url = f"http://{host}/ubus" self.ubus = Ubus(self.url, self.username, self.password) self.hostapd = [] self.mac2name = None self.success_init = self.ubus.connect() is not None def scan_devices(self): """Scan for new devices and return a list with found device IDs.""" self._update_info() return self.last_results def _generate_mac2name(self): """Return empty MAC to name dict. Overridden if DHCP server is set.""" self.mac2name = {} @_refresh_on_access_denied def get_device_name(self, device): """Return the name of the given device or None if we don't know.""" if self.mac2name is None: self._generate_mac2name() if self.mac2name is None: # Generation of mac2name dictionary failed return None name = self.mac2name.get(device.upper(), None) return name @_refresh_on_access_denied def _update_info(self): """Ensure the information from the router is up to date. Returns boolean if scanning successful. """ if not self.success_init: return False _LOGGER.info("Checking hostapd") if not self.hostapd: hostapd = self.ubus.get_hostapd() self.hostapd.extend(hostapd.keys()) self.last_results = [] results = 0 # for each access point for hostapd in self.hostapd: if result := self.ubus.get_hostapd_clients(hostapd): results = results + 1 # Check for each device is authorized (valid wpa key) for key in result["clients"].keys(): device = result["clients"][key] if device["authorized"]: self.last_results.append(key) return bool(results) class DnsmasqUbusDeviceScanner(UbusDeviceScanner): """Implement the Ubus device scanning for the dnsmasq DHCP server.""" def __init__(self, config): """Initialize the scanner.""" super().__init__(config) self.leasefile = None def _generate_mac2name(self): if self.leasefile is None: if result := self.ubus.get_uci_config("dhcp", "dnsmasq"): values = result["values"].values() self.leasefile = next(iter(values))["leasefile"] else: return result = self.ubus.file_read(self.leasefile) if result: self.mac2name = {} for line in result["data"].splitlines(): hosts = line.split(" ") self.mac2name[hosts[1].upper()] = hosts[3] else: # Error, handled in the ubus.file_read() return class OdhcpdUbusDeviceScanner(UbusDeviceScanner): """Implement the Ubus device scanning for the odhcp DHCP server.""" def _generate_mac2name(self): if result := self.ubus.get_dhcp_method("ipv4leases"): self.mac2name = {} for device in result["device"].values(): for lease in device["leases"]: mac = lease["mac"] # mac = aabbccddeeff # Convert it to expected format with colon mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2)) self.mac2name[mac.upper()] = lease["hostname"] else: # Error, handled in the ubus.get_dhcp_method() return
Java
/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: src/usr/hwpf/hwp/core_activate/proc_post_winkle/proc_post_winkle.C $ */ /* */ /* OpenPOWER HostBoot Project */ /* */ /* Contributors Listed Below - COPYRIGHT 2013,2014 */ /* [+] International Business Machines Corp. */ /* */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ /* implied. See the License for the specific language governing */ /* permissions and limitations under the License. */ /* */ /* IBM_PROLOG_END_TAG */ // $Id: proc_post_winkle.C,v 1.2 2013/07/18 00:45:00 stillgs Exp $ // $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/fapi/proc_post_winkle.C,v $ //------------------------------------------------------------------------------ // *! (C) Copyright International Business Machines Corp. 2011 // *! All Rights Reserved -- Property of IBM // *! *** *** //------------------------------------------------------------------------------ // *! OWNER NAME : Greg Still Email: [email protected] // *! BACKUP NAME : Michael Olsen Email: [email protected] /// \file proc_post_winkle.C /// \brief Re-enables the standard product idle mode configuration after /// an IPL Winkle action /// /// \verbatim /// /// For the passed EX target, /// - Remove disable of DISABLE_FORCE_DEEP_TO_FAST_WINKLE that was /// set on the master core. Removing on the non_master cores /// is not harmful /// /// Procedure Prereq: /// - System clocks are running /// \endverbatim /// //------------------------------------------------------------------------------ // ---------------------------------------------------------------------- // Includes // ---------------------------------------------------------------------- #include "proc_post_winkle.H" extern "C" { using namespace fapi; // ---------------------------------------------------------------------- // Constant definitions // ---------------------------------------------------------------------- // ---------------------------------------------------------------------- // Global variables // ---------------------------------------------------------------------- // ---------------------------------------------------------------------- // Function prototypes // ---------------------------------------------------------------------- // ---------------------------------------------------------------------- // Function definitions // ---------------------------------------------------------------------- /** * proc_post_winkle * * @param[in] i_target EX target * * @retval ECMD_SUCCESS * @retval ERROR only those from called functions or MACROs */ fapi::ReturnCode proc_post_winkle(const Target& i_ex_target) { fapi::ReturnCode l_rc; uint32_t rc = 0; ecmdDataBufferBase data(64); uint64_t address = 0; uint64_t ex_offset = 0; uint8_t l_ex_number = 0; fapi::Target l_parentTarget; do { FAPI_INF("Beginnning proc_post_winkle..."); // Get the parent chip to target the PCBS registers l_rc = fapiGetParentChip(i_ex_target, l_parentTarget); if (l_rc) { FAPI_ERR("fapiGetParentChip access"); break; } // Get the core number l_rc = FAPI_ATTR_GET(ATTR_CHIP_UNIT_POS, &i_ex_target, l_ex_number); if (l_rc) { FAPI_ERR("fapiGetAttribute of ATTR_CHIP_UNIT_POS with rc = 0x%x", (uint32_t)l_rc); break; } FAPI_INF("Processing core %d on %s", l_ex_number, l_parentTarget.toEcmdString()); ex_offset = l_ex_number * 0x01000000; // Debug address = EX_PMGP1_0x100F0103 + ex_offset; l_rc = fapiGetScom(l_parentTarget, address, data); if(!l_rc.ok()) { FAPI_ERR("Scom error reading PMGP1\n"); break; } FAPI_DBG("\tBefore PMGP1: 0x%016llX", data.getDoubleWord(0)); // Enable movement to Fast Winkle if errors are present. This is // turned off in the during the IPL process rc |= data.flushTo1(); rc |= data.clearBit(20); if(rc) { FAPI_ERR("Error (0x%x) setting up ecmdDataBufferBase", rc); l_rc.setEcmdError(rc); break; } address = EX_PMGP1_AND_0x100F0104 + ex_offset; l_rc = fapiPutScom(l_parentTarget, address, data); if(!l_rc.ok()) { FAPI_ERR("Scom error updating PMGP1\n"); break; } FAPI_INF("Enabled the conversion of Deep Winkle operations to Fast Winkle if errors are present upon Winkle entry"); // Debug address = EX_PMGP1_0x100F0103 + ex_offset; l_rc = fapiGetScom(l_parentTarget, address, data); if(!l_rc.ok()) { FAPI_ERR("Scom error reading PMGP1\n"); break; } FAPI_DBG("\tAfter PMGP1: 0x%016llX", data.getDoubleWord(0)); } while(0); FAPI_INF("Exiting proc_post_winkle..."); return l_rc; } } //end extern C
Java
package org.elasticsearch.action.get; import com.bazaarvoice.elasticsearch.client.core.spi.RestExecutor; import com.bazaarvoice.elasticsearch.client.core.spi.RestResponse; import com.bazaarvoice.elasticsearch.client.core.util.UrlBuilder; import org.elasticsearch.action.AbstractRestClientAction; import org.elasticsearch.common.base.Function; import org.elasticsearch.common.util.concurrent.Futures; import org.elasticsearch.common.util.concurrent.ListenableFuture; import static com.bazaarvoice.elasticsearch.client.core.util.StringFunctions.booleanToString; import static com.bazaarvoice.elasticsearch.client.core.util.StringFunctions.commaDelimitedToString; import static com.bazaarvoice.elasticsearch.client.core.util.UrlBuilder.urlEncode; import static com.bazaarvoice.elasticsearch.client.core.util.Validation.notNull; import static org.elasticsearch.common.base.Optional.fromNullable; /** * The inverse of {@link org.elasticsearch.rest.action.get.RestGetAction} * * @param <ResponseType> */ public class GetRest<ResponseType> extends AbstractRestClientAction<GetRequest, ResponseType> { public GetRest(final String protocol, final String host, final int port, final RestExecutor executor, final Function<RestResponse, ResponseType> responseTransform) { super(protocol, host, port, executor, responseTransform); } @Override public ListenableFuture<ResponseType> act(GetRequest request) { UrlBuilder url = UrlBuilder.create() .protocol(protocol).host(host).port(port) .path(urlEncode(notNull(request.index()))) .seg(urlEncode(notNull(request.type()))) .seg(urlEncode(notNull(request.id()))) .paramIfPresent("refresh", fromNullable(request.refresh()).transform(booleanToString)) .paramIfPresent("routing", fromNullable(request.routing())) // note parent(string) seems just to set the routing, so we don't need to provide it here .paramIfPresent("preference", fromNullable(request.preference())) .paramIfPresent("realtime", fromNullable(request.realtime()).transform(booleanToString)) .paramIfPresent("fields", fromNullable(request.fields()).transform(commaDelimitedToString)); return Futures.transform(executor.get(url.url()), responseTransform); } }
Java
package internalversion import ( "github.com/openshift/origin/pkg/template/api" "k8s.io/apimachinery/pkg/api/errors" ) // TemplateListerExpansion allows custom methods to be added to // TemplateLister. type TemplateListerExpansion interface { GetByUID(uid string) (*api.Template, error) } // TemplateNamespaceListerExpansion allows custom methods to be added to // TemplateNamespaceLister. type TemplateNamespaceListerExpansion interface{} func (s templateLister) GetByUID(uid string) (*api.Template, error) { templates, err := s.indexer.ByIndex(api.TemplateUIDIndex, uid) if err != nil { return nil, err } if len(templates) == 0 { return nil, errors.NewNotFound(api.Resource("template"), uid) } return templates[0].(*api.Template), nil }
Java
dir = File.expand_path(File.dirname(__FILE__)) $LOAD_PATH.unshift File.join(dir, "../lib") # Maybe puppetlabs_spec_helper is in a directory next to puppetdb. If not, we # don't fail any worse than we already would. $LOAD_PATH.push File.join(dir, "../../../puppetlabs_spec_helper") require 'rspec' require 'puppetlabs_spec_helper/puppet_spec_helper' require 'tmpdir' require 'fileutils' require 'puppet' require 'puppet/util/log' require 'puppet/util/puppetdb/command' RSpec.configure do |config| config.before :each do @logs = [] Puppet::Util::Log.level = :info Puppet::Util::Log.newdestination(Puppet::Test::LogCollector.new(@logs)) def test_logs @logs.map(&:message) end end end
Java
/************************************************************************************************************ DHTML Suite for Applications (C) www.dhtmlgoodies.com, Janury 2007 CSS for the DHTMLSuite.imageSelection class. www.dhtmlgoodies.com Alf Magne Kalleland ************************************************************************************************************/ /* Mane selection div - the dragable rectangle */ #DHTMLSuite_imageSelectionSel{ position:absolute; overflow:hidden; border:1px solid #222; /* Blue border */ z-index:5000000; } /* Transparent div inside the image selection div */ #DHTMLSuite_imageSelection_transparentDiv{ /* This is the transparent div placed inside #DHTMLSuite_imageSelection */ filter:alpha(opacity=50); opacity:0.5; -moz-opacity:0.5; background-color:#666; /* Blue background color */ position:absolute; left:-5px; top:-5px; width:200%; height:200%; } /* Div for the drag process - dragging images */ #DHTMLSuite_imageSelectionDrag{ position:absolute; border:1px solid #222; z-index:5000000; overflow:hidden; width:107px; } #DHTMLSuite_imageSelectionDrag #DHTMLSuite_imageSelectionDragContent{ padding:0px; z-index:5; position:relative; } /* Div for the drag process - small boxes for each image. These divs are placed inside #DHTMLSuite_imageSelectionDragContent */ #DHTMLSuite_imageSelectionDrag .DHTMLSuite_imageSelectionDragBox{ /* Individual box for each image */ width:35px; height:35px; float:left; background-repeat:no-repeat; background-position:center center; } /* Div inside the div with id #DHTMLSuite_imageSelectionDrag - it could be transparent or not, that's your choice */ #DHTMLSuite_imageSelectionDrag .DHTMLSuite_imageSelectionDrag_transparentDiv{ background-color:#666; /* Blue background color */ position:absolute; left:-5px; top:-5px; width:200%; height:200%; z-index:4; }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.pdfbox.tools; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.Reader; import java.util.HashMap; import java.util.Map; import org.apache.pdfbox.pdmodel.PDDocument; import org.apache.pdfbox.pdmodel.PDPage; import org.apache.pdfbox.pdmodel.PDPageContentStream; import org.apache.pdfbox.pdmodel.font.PDFont; import org.apache.pdfbox.pdmodel.font.PDType0Font; import org.apache.pdfbox.pdmodel.font.PDType1Font; /** * This will take a text file and ouput a pdf with that text. * * @author Ben Litchfield */ public class TextToPDF { /** * The scaling factor for font units to PDF units */ private static final int FONTSCALE = 1000; /** * The default font */ private static final PDType1Font DEFAULT_FONT = PDType1Font.HELVETICA; /** * The default font size */ private static final int DEFAULT_FONT_SIZE = 10; /** * The line height as a factor of the font size */ private static final float LINE_HEIGHT_FACTOR = 1.05f; private int fontSize = DEFAULT_FONT_SIZE; private PDFont font = DEFAULT_FONT; private static final Map<String, PDType1Font> STANDARD_14 = new HashMap<String, PDType1Font>(); static { STANDARD_14.put(PDType1Font.TIMES_ROMAN.getBaseFont(), PDType1Font.TIMES_ROMAN); STANDARD_14.put(PDType1Font.TIMES_BOLD.getBaseFont(), PDType1Font.TIMES_BOLD); STANDARD_14.put(PDType1Font.TIMES_ITALIC.getBaseFont(), PDType1Font.TIMES_ITALIC); STANDARD_14.put(PDType1Font.TIMES_BOLD_ITALIC.getBaseFont(), PDType1Font.TIMES_BOLD_ITALIC); STANDARD_14.put(PDType1Font.HELVETICA.getBaseFont(), PDType1Font.HELVETICA); STANDARD_14.put(PDType1Font.HELVETICA_BOLD.getBaseFont(), PDType1Font.HELVETICA_BOLD); STANDARD_14.put(PDType1Font.HELVETICA_OBLIQUE.getBaseFont(), PDType1Font.HELVETICA_OBLIQUE); STANDARD_14.put(PDType1Font.HELVETICA_BOLD_OBLIQUE.getBaseFont(), PDType1Font.HELVETICA_BOLD_OBLIQUE); STANDARD_14.put(PDType1Font.COURIER.getBaseFont(), PDType1Font.COURIER); STANDARD_14.put(PDType1Font.COURIER_BOLD.getBaseFont(), PDType1Font.COURIER_BOLD); STANDARD_14.put(PDType1Font.COURIER_OBLIQUE.getBaseFont(), PDType1Font.COURIER_OBLIQUE); STANDARD_14.put(PDType1Font.COURIER_BOLD_OBLIQUE.getBaseFont(), PDType1Font.COURIER_BOLD_OBLIQUE); STANDARD_14.put(PDType1Font.SYMBOL.getBaseFont(), PDType1Font.SYMBOL); STANDARD_14.put(PDType1Font.ZAPF_DINGBATS.getBaseFont(), PDType1Font.ZAPF_DINGBATS); } /** * Create a PDF document with some text. * * @param text The stream of text data. * * @return The document with the text in it. * * @throws IOException If there is an error writing the data. */ public PDDocument createPDFFromText( Reader text ) throws IOException { PDDocument doc = new PDDocument(); createPDFFromText(doc, text); return doc; } /** * Create a PDF document with some text. * * @param text The stream of text data. * * @throws IOException If there is an error writing the data. */ public void createPDFFromText( PDDocument doc, Reader text ) throws IOException { try { final int margin = 40; float height = font.getBoundingBox().getHeight() / FONTSCALE; //calculate font height and increase by a factor. height = height*fontSize*LINE_HEIGHT_FACTOR; BufferedReader data = new BufferedReader( text ); String nextLine = null; PDPage page = new PDPage(); PDPageContentStream contentStream = null; float y = -1; float maxStringLength = page.getMediaBox().getWidth() - 2*margin; // There is a special case of creating a PDF document from an empty string. boolean textIsEmpty = true; while( (nextLine = data.readLine()) != null ) { // The input text is nonEmpty. New pages will be created and added // to the PDF document as they are needed, depending on the length of // the text. textIsEmpty = false; String[] lineWords = nextLine.trim().split( " " ); int lineIndex = 0; while( lineIndex < lineWords.length ) { StringBuilder nextLineToDraw = new StringBuilder(); float lengthIfUsingNextWord = 0; do { nextLineToDraw.append( lineWords[lineIndex] ); nextLineToDraw.append( " " ); lineIndex++; if( lineIndex < lineWords.length ) { String lineWithNextWord = nextLineToDraw.toString() + lineWords[lineIndex]; lengthIfUsingNextWord = (font.getStringWidth( lineWithNextWord )/FONTSCALE) * fontSize; } } while( lineIndex < lineWords.length && lengthIfUsingNextWord < maxStringLength ); if( y < margin ) { // We have crossed the end-of-page boundary and need to extend the // document by another page. page = new PDPage(); doc.addPage( page ); if( contentStream != null ) { contentStream.endText(); contentStream.close(); } contentStream = new PDPageContentStream(doc, page); contentStream.setFont( font, fontSize ); contentStream.beginText(); y = page.getMediaBox().getHeight() - margin + height; contentStream.newLineAtOffset( margin, y); } if( contentStream == null ) { throw new IOException( "Error:Expected non-null content stream." ); } contentStream.newLineAtOffset(0, -height); y -= height; contentStream.showText(nextLineToDraw.toString()); } } // If the input text was the empty string, then the above while loop will have short-circuited // and we will not have added any PDPages to the document. // So in order to make the resultant PDF document readable by Adobe Reader etc, we'll add an empty page. if (textIsEmpty) { doc.addPage(page); } if( contentStream != null ) { contentStream.endText(); contentStream.close(); } } catch( IOException io ) { if( doc != null ) { doc.close(); } throw io; } } /** * This will create a PDF document with some text in it. * <br /> * see usage() for commandline * * @param args Command line arguments. * * @throws IOException If there is an error with the PDF. */ public static void main(String[] args) throws IOException { // suppress the Dock icon on OS X System.setProperty("apple.awt.UIElement", "true"); TextToPDF app = new TextToPDF(); PDDocument doc = new PDDocument(); try { if( args.length < 2 ) { app.usage(); } else { for( int i=0; i<args.length-2; i++ ) { if( args[i].equals( "-standardFont" )) { i++; app.setFont( getStandardFont( args[i] )); } else if( args[i].equals( "-ttf" )) { i++; PDFont font = PDType0Font.load( doc, new File( args[i]) ); app.setFont( font ); } else if( args[i].equals( "-fontSize" )) { i++; app.setFontSize( Integer.parseInt( args[i] ) ); } else { throw new IOException( "Unknown argument:" + args[i] ); } } app.createPDFFromText( doc, new FileReader( args[args.length-1] ) ); doc.save( args[args.length-2] ); } } finally { doc.close(); } } /** * This will print out a message telling how to use this example. */ private void usage() { String[] std14 = getStandard14Names(); StringBuilder message = new StringBuilder(); message.append("Usage: jar -jar pdfbox-app-x.y.z.jar TextToPDF [options] <outputfile> <textfile>\n"); message.append("\nOptions:\n"); message.append(" -standardFont <name> : " + DEFAULT_FONT.getBaseFont() + " (default)\n"); for (String std14String : std14) { message.append(" " + std14String + "\n"); } message.append(" -ttf <ttf file> : The TTF font to use.\n"); message.append(" -fontSize <fontSize> : default: " + DEFAULT_FONT_SIZE ); System.err.println(message.toString()); System.exit(1); } /** * A convenience method to get one of the standard 14 font from name. * * @param name The name of the font to get. * * @return The font that matches the name or null if it does not exist. */ private static PDType1Font getStandardFont(String name) { return STANDARD_14.get(name); } /** * This will get the names of the standard 14 fonts. * * @return An array of the names of the standard 14 fonts. */ private static String[] getStandard14Names() { return STANDARD_14.keySet().toArray(new String[14]); } /** * @return Returns the font. */ public PDFont getFont() { return font; } /** * @param aFont The font to set. */ public void setFont(PDFont aFont) { this.font = aFont; } /** * @return Returns the fontSize. */ public int getFontSize() { return fontSize; } /** * @param aFontSize The fontSize to set. */ public void setFontSize(int aFontSize) { this.fontSize = aFontSize; } }
Java
/* * Copyright 2012 International Business Machines Corp. * * See the NOTICE file distributed with this work for additional information * regarding copyright ownership. Licensed under the Apache License, * Version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package javax.batch.api.chunk.listener; import java.util.List; /** * The AbstractItemWriteListener provides default * implementations of less commonly implemented methods. */ public abstract class AbstractItemWriteListener implements ItemWriteListener { /** * Override this method if the ItemWriteListener * will do something before the items are written. * The default implementation does nothing. * * @param items specifies the items about to be * written. * @throws Exception (or subclass) if an error occurs. */ @Override public void beforeWrite(List<Object> items) throws Exception {} /** * Override this method if the ItemWriteListener * will do something after the items are written. * The default implementation does nothing. * * @param items specifies the items about to be * written. * @throws Exception (or subclass) if an error occurs. */ @Override public void afterWrite(List<Object> items) throws Exception {} /** * Override this method if the ItemWriteListener * will do something when the ItemWriter writeItems * method throws an exception. * The default implementation does nothing. * * @param items specifies the items about to be * written. * @param ex specifies the exception thrown by the item * writer. * @throws Exception (or subclass) if an error occurs. */ @Override public void onWriteError(List<Object> items, Exception ex) throws Exception {} }
Java
/* * #%L * SparkCommerce Framework Web * %% * Copyright (C) 2009 - 2013 Spark Commerce * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.sparkcommerce.core.payment.service; import org.sparkcommerce.common.payment.PaymentGatewayType; import org.springframework.stereotype.Service; /** * In order to use load this demo service, you will need to component scan * the package "com.mycompany.sample". * * This should NOT be used in production, and is meant solely for demonstration * purposes only. * * @author Elbert Bautista (elbertbautista) */ @Service("blNullPaymentGatewayConfiguration") public class NullPaymentGatewayConfigurationImpl implements NullPaymentGatewayConfiguration { protected int failureReportingThreshold = 1; protected boolean performAuthorizeAndCapture = true; @Override public String getTransparentRedirectUrl() { return "/null-checkout/process"; } @Override public String getTransparentRedirectReturnUrl() { return "/null-checkout/return"; } @Override public boolean isPerformAuthorizeAndCapture() { return true; } @Override public void setPerformAuthorizeAndCapture(boolean performAuthorizeAndCapture) { this.performAuthorizeAndCapture = performAuthorizeAndCapture; } @Override public int getFailureReportingThreshold() { return failureReportingThreshold; } @Override public void setFailureReportingThreshold(int failureReportingThreshold) { this.failureReportingThreshold = failureReportingThreshold; } @Override public boolean handlesAuthorize() { return true; } @Override public boolean handlesCapture() { return false; } @Override public boolean handlesAuthorizeAndCapture() { return true; } @Override public boolean handlesReverseAuthorize() { return false; } @Override public boolean handlesVoid() { return false; } @Override public boolean handlesRefund() { return false; } @Override public boolean handlesPartialCapture() { return false; } @Override public boolean handlesMultipleShipment() { return false; } @Override public boolean handlesRecurringPayment() { return false; } @Override public boolean handlesSavedCustomerPayment() { return false; } @Override public boolean handlesMultiplePayments() { return false; } @Override public PaymentGatewayType getGatewayType() { return NullPaymentGatewayType.NULL_GATEWAY; } }
Java
/* * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ (function () { 'use strict'; angular .module('webApp') .controller('DriversListCtrl', DriversListCtrl); DriversListCtrl.$inject = ['$scope', 'EntityFactory', 'ModalService', 'UtilsService', '$state']; function DriversListCtrl($scope, EntityFactory, ModalService, UtilsService, $state) { /*jshint validthis: true*/ var vm = this; vm.deleteDriver = deleteDriver; vm.getAllDrivers = getAllDrivers; vm.createDriver = createDriver; vm.sortDrivers = sortDrivers; vm.tableReverse = false; vm.sortField = 'fileName'; vm.errorMessage = { type: 'error', text: '', internalTrace: '' }; vm.successMessage = { type: 'success', text: '', internalTrace: '' }; init(); ///////////////////////////////// function init() { getAllDrivers(); } function getAllDrivers() { EntityFactory.getAllDrivers().then(function (drivers) { vm.driversData = drivers; }); } function createDriver() { var controller = 'CreateEntityModalCtrl'; var templateUrl = "templates/modal/entity-creation-modal.tpl.html"; var resolve = { type: function () { return "DRIVER"; }, title: function () { return "_ENTITY_._CREATE_DRIVER_TITLE_"; }, info: function () { return "_DRIVER_INFO_"; }, text: function () { return "_DRIVER_TEXT_"; }, }; var modalInstance = ModalService.openModal(controller, templateUrl, resolve, '', 'lg'); return modalInstance.result.then(function () { getAllDrivers(); vm.successMessage.text = '_DRIVER_CREATE_OK_'; }); } function deleteDriver(fileName) { return deleteDriverConfirm('lg', fileName); } function deleteDriverConfirm(size, fileName) { var controller = 'DeleteEntityModalCtrl'; var templateUrl = "templates/modal/entity-delete-modal.tpl.html"; var resolve = { item: function () { return fileName; }, type: function () { return "DRIVER"; }, title: function () { return "_ENTITY_._DELETE_DRIVER_TITLE_"; } }; var modalInstance = ModalService.openModal(controller, templateUrl, resolve, '', size); return modalInstance.result.then(function (fileName) { var index = UtilsService.getArrayElementPosition(vm.driversData, 'fileName', fileName); vm.driversData.splice(index, 1); vm.successMessage.text = '_DRIVER_DELETE_OK_'; }); } function sortDrivers(fieldName) { if (fieldName == vm.sortField) { vm.tableReverse = !vm.tableReverse; } else { vm.tableReverse = false; vm.sortField = fieldName; } } } })();
Java
"""Support for Switchbot devices.""" from asyncio import Lock import switchbot # pylint: disable=import-error from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_SENSOR_TYPE, Platform from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from .const import ( ATTR_BOT, ATTR_CURTAIN, BTLE_LOCK, COMMON_OPTIONS, CONF_RETRY_COUNT, CONF_RETRY_TIMEOUT, CONF_SCAN_TIMEOUT, CONF_TIME_BETWEEN_UPDATE_COMMAND, DATA_COORDINATOR, DEFAULT_RETRY_COUNT, DEFAULT_RETRY_TIMEOUT, DEFAULT_SCAN_TIMEOUT, DEFAULT_TIME_BETWEEN_UPDATE_COMMAND, DOMAIN, ) from .coordinator import SwitchbotDataUpdateCoordinator PLATFORMS_BY_TYPE = { ATTR_BOT: [Platform.SWITCH, Platform.SENSOR], ATTR_CURTAIN: [Platform.COVER, Platform.BINARY_SENSOR, Platform.SENSOR], } async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Switchbot from a config entry.""" hass.data.setdefault(DOMAIN, {}) if not entry.options: options = { CONF_TIME_BETWEEN_UPDATE_COMMAND: DEFAULT_TIME_BETWEEN_UPDATE_COMMAND, CONF_RETRY_COUNT: DEFAULT_RETRY_COUNT, CONF_RETRY_TIMEOUT: DEFAULT_RETRY_TIMEOUT, CONF_SCAN_TIMEOUT: DEFAULT_SCAN_TIMEOUT, } hass.config_entries.async_update_entry(entry, options=options) # Use same coordinator instance for all entities. # Uses BTLE advertisement data, all Switchbot devices in range is stored here. if DATA_COORDINATOR not in hass.data[DOMAIN]: # Check if asyncio.lock is stored in hass data. # BTLE has issues with multiple connections, # so we use a lock to ensure that only one API request is reaching it at a time: if BTLE_LOCK not in hass.data[DOMAIN]: hass.data[DOMAIN][BTLE_LOCK] = Lock() if COMMON_OPTIONS not in hass.data[DOMAIN]: hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options} switchbot.DEFAULT_RETRY_TIMEOUT = hass.data[DOMAIN][COMMON_OPTIONS][ CONF_RETRY_TIMEOUT ] # Store api in coordinator. coordinator = SwitchbotDataUpdateCoordinator( hass, update_interval=hass.data[DOMAIN][COMMON_OPTIONS][ CONF_TIME_BETWEEN_UPDATE_COMMAND ], api=switchbot, retry_count=hass.data[DOMAIN][COMMON_OPTIONS][CONF_RETRY_COUNT], scan_timeout=hass.data[DOMAIN][COMMON_OPTIONS][CONF_SCAN_TIMEOUT], api_lock=hass.data[DOMAIN][BTLE_LOCK], ) hass.data[DOMAIN][DATA_COORDINATOR] = coordinator else: coordinator = hass.data[DOMAIN][DATA_COORDINATOR] await coordinator.async_config_entry_first_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady entry.async_on_unload(entry.add_update_listener(_async_update_listener)) hass.data[DOMAIN][entry.entry_id] = {DATA_COORDINATOR: coordinator} sensor_type = entry.data[CONF_SENSOR_TYPE] hass.config_entries.async_setup_platforms(entry, PLATFORMS_BY_TYPE[sensor_type]) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" sensor_type = entry.data[CONF_SENSOR_TYPE] unload_ok = await hass.config_entries.async_unload_platforms( entry, PLATFORMS_BY_TYPE[sensor_type] ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) if len(hass.config_entries.async_entries(DOMAIN)) == 0: hass.data.pop(DOMAIN) return unload_ok async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None: """Handle options update.""" # Update entity options stored in hass. if {**entry.options} != hass.data[DOMAIN][COMMON_OPTIONS]: hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options} hass.data[DOMAIN].pop(DATA_COORDINATOR) await hass.config_entries.async_reload(entry.entry_id)
Java
package org.zstack.sdk.zwatch.thirdparty.api; public class QueryThirdpartyAlertResult { public java.util.List inventories; public void setInventories(java.util.List inventories) { this.inventories = inventories; } public java.util.List getInventories() { return this.inventories; } public java.lang.Long total; public void setTotal(java.lang.Long total) { this.total = total; } public java.lang.Long getTotal() { return this.total; } }
Java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import React from 'react'; import { Link } from 'dva/router'; import Exception from '../../components/Exception'; export default () => ( <Exception type="500" style={{ minHeight: 500, height: '80%' }} linkElement={Link} /> );
Java
+++ Talk_date = "" Talk_start_time = "" Talk_end_time = "" Title = "Postmortem da estrela da morte" Type = "talk" Speakers = ["rafael-barbosa"] youtube = "" slideshare = "" slides = "" +++ Blameless postmortem, um dos rituais mais importante da cultura Devops sendo aplicado aos erros do projeto da estrela da morte e com o Darth Vader comandando. Uma maneira divertida de aprender. Se o pior Sith de todos conseguiu aplicar você também consegue, sem culpa.
Java
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.discovery.zen.publish; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @TestLogging("discovery.zen.publish:TRACE") public class PublishClusterStateActionTests extends ESTestCase { private static final ClusterName CLUSTER_NAME = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); protected ThreadPool threadPool; protected Map<String, MockNode> nodes = new HashMap<>(); public static class MockNode implements PublishClusterStateAction.NewPendingClusterStateListener, DiscoveryNodesProvider { public final DiscoveryNode discoveryNode; public final MockTransportService service; public MockPublishAction action; public final ClusterStateListener listener; public volatile ClusterState clusterState; private final ESLogger logger; public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, ESLogger logger) { this.discoveryNode = discoveryNode; this.service = service; this.listener = listener; this.logger = logger; this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); } public MockNode setAsMaster() { this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())).build(); return this; } public MockNode resetMasterId() { this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(null)).build(); return this; } public void connectTo(DiscoveryNode node) { service.connectToNode(node); } @Override public void onNewClusterState(String reason) { ClusterState newClusterState = action.pendingStatesQueue().getNextClusterStateToProcess(); logger.debug("[{}] received version [{}], uuid [{}]", discoveryNode.getName(), newClusterState.version(), newClusterState.stateUUID()); if (listener != null) { ClusterChangedEvent event = new ClusterChangedEvent("", newClusterState, clusterState); listener.clusterChanged(event); } if (clusterState.nodes().getMasterNode() == null || newClusterState.supersedes(clusterState)) { clusterState = newClusterState; } action.pendingStatesQueue().markAsProcessed(newClusterState); } @Override public DiscoveryNodes nodes() { return clusterState.nodes(); } } public MockNode createMockNode(final String name) throws Exception { return createMockNode(name, Settings.EMPTY); } public MockNode createMockNode(String name, Settings settings) throws Exception { return createMockNode(name, settings, null); } public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception { final Settings settings = Settings.builder() .put("name", name) .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(basSettings) .build(); MockTransportService service = buildTransportService(settings); DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(), NodeEnvironment.generateNodeId(settings)); MockNode node = new MockNode(discoveryNode, service, listener, logger); node.action = buildPublishClusterStateAction(settings, service, () -> node.clusterState, node); final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); TransportConnectionListener waitForConnection = new TransportConnectionListener() { @Override public void onNodeConnected(DiscoveryNode node) { latch.countDown(); } @Override public void onNodeDisconnected(DiscoveryNode node) { fail("disconnect should not be called " + node); } }; node.service.addConnectionListener(waitForConnection); for (MockNode curNode : nodes.values()) { curNode.service.addConnectionListener(waitForConnection); curNode.connectTo(node.discoveryNode); node.connectTo(curNode.discoveryNode); } node.connectTo(node.discoveryNode); assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true)); for (MockNode curNode : nodes.values()) { curNode.service.removeConnectionListener(waitForConnection); } node.service.removeConnectionListener(waitForConnection); if (nodes.put(name, node) != null) { fail("Node with the name " + name + " already exist"); } return node; } public MockTransportService service(String name) { MockNode node = nodes.get(name); if (node != null) { return node.service; } return null; } public PublishClusterStateAction action(String name) { MockNode node = nodes.get(name); if (node != null) { return node.action; } return null; } @Override @Before public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getClass().getName()); } @Override @After public void tearDown() throws Exception { super.tearDown(); for (MockNode curNode : nodes.values()) { curNode.action.close(); curNode.service.close(); } terminate(threadPool); } protected MockTransportService buildTransportService(Settings settings) { MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); transportService.start(); transportService.acceptIncomingRequests(); return transportService; } protected MockPublishAction buildPublishClusterStateAction( Settings settings, MockTransportService transportService, Supplier<ClusterState> clusterStateSupplier, PublishClusterStateAction.NewPendingClusterStateListener listener ) { DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); return new MockPublishAction( settings, transportService, clusterStateSupplier, listener, discoverySettings, CLUSTER_NAME); } public void testSimpleClusterStatePublishing() throws Exception { MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster(); MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state ClusterState clusterState = nodeA.clusterState; // cluster state update - add nodeB DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).put(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromFull(nodeB.clusterState, clusterState); // cluster state update - add block previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); assertThat(nodeB.clusterState.blocks().global().size(), equalTo(1)); // cluster state update - remove block previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); assertTrue(nodeB.clusterState.wasReadFromDiff()); // Adding new node - this node should get full cluster state while nodeB should still be getting diffs MockNode nodeC = createMockNode("nodeC", Settings.EMPTY); // cluster state update 3 - register node C previousClusterState = clusterState; discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); // First state assertSameStateFromFull(nodeC.clusterState, clusterState); // cluster state update 4 - update settings previousClusterState = clusterState; MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(Settings.builder().put("foo", "bar").build()).build(); clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); assertThat(nodeB.clusterState.blocks().global().size(), equalTo(0)); assertSameStateFromDiff(nodeC.clusterState, clusterState); assertThat(nodeC.clusterState.blocks().global().size(), equalTo(0)); // cluster state update - skipping one version change - should request full cluster state previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); clusterState = ClusterState.builder(clusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromFull(nodeB.clusterState, clusterState); assertSameStateFromFull(nodeC.clusterState, clusterState); assertFalse(nodeC.clusterState.wasReadFromDiff()); // node A steps down from being master nodeA.resetMasterId(); nodeB.resetMasterId(); nodeC.resetMasterId(); // node B becomes the master and sends a version of the cluster state that goes back discoveryNodes = DiscoveryNodes.builder(discoveryNodes) .put(nodeA.discoveryNode) .put(nodeB.discoveryNode) .put(nodeC.discoveryNode) .masterNodeId(nodeB.discoveryNode.getId()) .localNodeId(nodeB.discoveryNode.getId()) .build(); previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeB.action, clusterState, previousClusterState); assertSameStateFromFull(nodeA.clusterState, clusterState); assertSameStateFromFull(nodeC.clusterState, clusterState); } public void testUnexpectedDiffPublishing() throws Exception { MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, event -> { fail("Shouldn't send cluster state to myself"); }).setAsMaster(); MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromFull(nodeB.clusterState, clusterState); // cluster state update - add block previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); } public void testDisablingDiffPublishing() throws Exception { Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build(); MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { fail("Shouldn't send cluster state to myself"); } }); MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { assertFalse(event.state().wasReadFromDiff()); } }); // Initial cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); // cluster state update - add nodeB discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); // cluster state update - add block previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); } /** * Test not waiting on publishing works correctly (i.e., publishing times out) */ public void testSimultaneousClusterStatePublishing() throws Exception { int numberOfNodes = randomIntBetween(2, 10); int numberOfIterations = scaledRandomIntBetween(5, 50); Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build(); MockNode master = createMockNode("node0", settings, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { assertProperMetaDataForVersion(event.state().metaData(), event.state().version()); } }).setAsMaster(); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(master.nodes()); for (int i = 1; i < numberOfNodes; i++) { final String name = "node" + i; final MockNode node = createMockNode(name, settings, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { assertProperMetaDataForVersion(event.state().metaData(), event.state().version()); } }); discoveryNodesBuilder.put(node.discoveryNode); } AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); MetaData metaData = MetaData.EMPTY_META_DATA; ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).build(); ClusterState previousState; for (int i = 0; i < numberOfIterations; i++) { previousState = clusterState; metaData = buildMetaDataForVersion(metaData, i + 1); clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build(); listeners[i] = publishState(master.action, clusterState, previousState); } for (int i = 0; i < numberOfIterations; i++) { listeners[i].await(1, TimeUnit.SECONDS); } // set the master cs master.clusterState = clusterState; for (MockNode node : nodes.values()) { assertSameState(node.clusterState, clusterState); assertThat(node.clusterState.nodes().getLocalNode(), equalTo(node.discoveryNode)); } } public void testSerializationFailureDuringDiffPublishing() throws Exception { MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { fail("Shouldn't send cluster state to myself"); } }).setAsMaster(); MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromFull(nodeB.clusterState, clusterState); // cluster state update - add block previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) { @Override public Diff<ClusterState> diff(ClusterState previousState) { return new Diff<ClusterState>() { @Override public ClusterState apply(ClusterState part) { fail("this diff shouldn't be applied"); return part; } @Override public void writeTo(StreamOutput out) throws IOException { throw new IOException("Simulated failure of diff serialization"); } }; } }; try { publishStateAndWait(nodeA.action, unserializableClusterState, previousClusterState); fail("cluster state published despite of diff errors"); } catch (Discovery.FailedToCommitClusterStateException e) { assertThat(e.getCause(), notNullValue()); assertThat(e.getCause().getMessage(), containsString("failed to serialize")); } } public void testFailToPublishWithLessThanMinMasterNodes() throws Exception { final int masterNodes = randomIntBetween(1, 10); MockNode master = createMockNode("master"); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); for (int i = 1; i < masterNodes; i++) { discoveryNodesBuilder.put(createMockNode("node" + i).discoveryNode); } final int dataNodes = randomIntBetween(0, 5); final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(); for (int i = 0; i < dataNodes; i++) { discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode); } discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); MetaData metaData = MetaData.EMPTY_META_DATA; ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build(); ClusterState previousState = master.clusterState; try { publishState(master.action, clusterState, previousState, masterNodes + randomIntBetween(1, 5)); fail("cluster state publishing didn't fail despite of not having enough nodes"); } catch (Discovery.FailedToCommitClusterStateException expected) { logger.debug("failed to publish as expected", expected); } } public void testPublishingWithSendingErrors() throws Exception { int goodNodes = randomIntBetween(2, 5); int errorNodes = randomIntBetween(1, 5); int timeOutNodes = randomBoolean() ? 0 : randomIntBetween(1, 5); // adding timeout nodes will force timeout errors final int numberOfMasterNodes = goodNodes + errorNodes + timeOutNodes + 1; // master final boolean expectingToCommit = randomBoolean(); Settings.Builder settings = Settings.builder(); // make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the test "hang" settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing MockNode master = createMockNode("master", settings.build()); // randomize things a bit int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes]; for (int i = 0; i < goodNodes; i++) { nodeTypes[i] = 0; } for (int i = goodNodes; i < goodNodes + errorNodes; i++) { nodeTypes[i] = 1; } for (int i = goodNodes + errorNodes; i < nodeTypes.length; i++) { nodeTypes[i] = 2; } Collections.shuffle(Arrays.asList(nodeTypes), random()); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); for (int i = 0; i < nodeTypes.length; i++) { final MockNode mockNode = createMockNode("node" + i); discoveryNodesBuilder.put(mockNode.discoveryNode); switch (nodeTypes[i]) { case 1: mockNode.action.errorOnSend.set(true); break; case 2: mockNode.action.timeoutOnSend.set(true); break; } } final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter for (int i = 0; i < dataNodes; i++) { final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()); discoveryNodesBuilder.put(mockNode.discoveryNode); if (randomBoolean()) { // we really don't care - just chaos monkey mockNode.action.errorOnCommit.set(randomBoolean()); mockNode.action.errorOnSend.set(randomBoolean()); mockNode.action.timeoutOnCommit.set(randomBoolean()); mockNode.action.timeoutOnSend.set(randomBoolean()); } } final int minMasterNodes; final String expectedBehavior; if (expectingToCommit) { minMasterNodes = randomIntBetween(0, goodNodes + 1); // count master expectedBehavior = "succeed"; } else { minMasterNodes = randomIntBetween(goodNodes + 2, numberOfMasterNodes); // +2 because of master expectedBehavior = timeOutNodes > 0 ? "timeout" : "fail"; } logger.info("--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]", expectedBehavior, goodNodes + 1, errorNodes, timeOutNodes, minMasterNodes); discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); MetaData metaData = MetaData.EMPTY_META_DATA; ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build(); ClusterState previousState = master.clusterState; try { publishState(master.action, clusterState, previousState, minMasterNodes); if (expectingToCommit == false) { fail("cluster state publishing didn't fail despite of not have enough nodes"); } } catch (Discovery.FailedToCommitClusterStateException exception) { logger.debug("failed to publish as expected", exception); if (expectingToCommit) { throw exception; } assertThat(exception.getMessage(), containsString(timeOutNodes > 0 ? "timed out" : "failed")); } } public void testIncomingClusterStateValidation() throws Exception { MockNode node = createMockNode("node"); logger.info("--> testing acceptances of any master when having no master"); ClusterState state = ClusterState.builder(node.clusterState) .nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId(randomAsciiOfLength(10))).incrementVersion().build(); node.action.validateIncomingState(state, null); // now set a master node node.clusterState = ClusterState.builder(node.clusterState).nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).build(); logger.info("--> testing rejection of another master"); try { node.action.validateIncomingState(state, node.clusterState); fail("node accepted state from another master"); } catch (IllegalStateException OK) { assertThat(OK.toString(), containsString("cluster state from a different master than the current one, rejecting")); } logger.info("--> test state from the current master is accepted"); node.action.validateIncomingState(ClusterState.builder(node.clusterState) .nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).incrementVersion().build(), node.clusterState); logger.info("--> testing rejection of another cluster name"); try { node.action.validateIncomingState(ClusterState.builder(new ClusterName(randomAsciiOfLength(10))).nodes(node.nodes()).build(), node.clusterState); fail("node accepted state with another cluster name"); } catch (IllegalStateException OK) { assertThat(OK.toString(), containsString("received state from a node that is not part of the cluster")); } logger.info("--> testing rejection of a cluster state with wrong local node"); try { state = ClusterState.builder(node.clusterState) .nodes(DiscoveryNodes.builder(node.nodes()).localNodeId("_non_existing_").build()) .incrementVersion().build(); node.action.validateIncomingState(state, node.clusterState); fail("node accepted state with non-existence local node"); } catch (IllegalStateException OK) { assertThat(OK.toString(), containsString("received state with a local node that does not match the current local node")); } try { MockNode otherNode = createMockNode("otherNode"); state = ClusterState.builder(node.clusterState).nodes( DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build() ).incrementVersion().build(); node.action.validateIncomingState(state, node.clusterState); fail("node accepted state with existent but wrong local node"); } catch (IllegalStateException OK) { assertThat(OK.toString(), containsString("received state with a local node that does not match the current local node")); } logger.info("--> testing acceptance of an old cluster state"); final ClusterState incomingState = node.clusterState; node.clusterState = ClusterState.builder(node.clusterState).incrementVersion().build(); final IllegalStateException e = expectThrows(IllegalStateException.class, () -> node.action.validateIncomingState(incomingState, node.clusterState)); final String message = String.format( Locale.ROOT, "rejecting cluster state version [%d] uuid [%s] received from [%s]", incomingState.version(), incomingState.stateUUID(), incomingState.nodes().getMasterNodeId() ); assertThat(e, hasToString("java.lang.IllegalStateException: " + message)); // an older version from a *new* master is also OK! ClusterState previousState = ClusterState.builder(node.clusterState).incrementVersion().build(); state = ClusterState.builder(node.clusterState) .nodes(DiscoveryNodes.builder(node.clusterState.nodes()).masterNodeId("_new_master_").build()) .build(); // remove the master of the node (but still have a previous cluster state with it)! node.resetMasterId(); node.action.validateIncomingState(state, previousState); } public void testOutOfOrderCommitMessages() throws Throwable { MockNode node = createMockNode("node").setAsMaster(); final CapturingTransportChannel channel = new CapturingTransportChannel(); List<ClusterState> states = new ArrayList<>(); final int numOfStates = scaledRandomIntBetween(3, 25); for (int i = 1; i <= numOfStates; i++) { states.add(ClusterState.builder(node.clusterState).version(i).stateUUID(ClusterState.UNKNOWN_UUID).build()); } final ClusterState finalState = states.get(numOfStates - 1); logger.info("--> publishing states"); for (ClusterState state : states) { node.action.handleIncomingClusterStateRequest( new BytesTransportRequest(PublishClusterStateAction.serializeFullClusterState(state, Version.CURRENT), Version.CURRENT), channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); assertThat(channel.error.get(), nullValue()); channel.clear(); } logger.info("--> committing states"); long largestVersionSeen = Long.MIN_VALUE; Randomness.shuffle(states); for (ClusterState state : states) { node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel); if (largestVersionSeen < state.getVersion()) { assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); if (channel.error.get() != null) { throw channel.error.get(); } largestVersionSeen = state.getVersion(); } else { // older cluster states will be rejected assertNotNull(channel.error.get()); assertThat(channel.error.get(), instanceOf(IllegalStateException.class)); } channel.clear(); } //now check the last state held assertSameState(node.clusterState, finalState); } /** * Tests that cluster is committed or times out. It should never be the case that we fail * an update due to a commit timeout, but it ends up being committed anyway */ public void testTimeoutOrCommit() throws Exception { Settings settings = Settings.builder() .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); ClusterState state = ClusterState.builder(master.clusterState) .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); for (int i = 0; i < 10; i++) { state = ClusterState.builder(state).incrementVersion().build(); logger.debug("--> publishing version [{}], UUID [{}]", state.version(), state.stateUUID()); boolean success; try { publishState(master.action, state, master.clusterState, 2).await(1, TimeUnit.HOURS); success = true; } catch (Discovery.FailedToCommitClusterStateException OK) { success = false; } logger.debug("--> publishing [{}], verifying...", success ? "succeeded" : "failed"); if (success) { assertSameState(node.clusterState, state); } else { assertThat(node.clusterState.stateUUID(), not(equalTo(state.stateUUID()))); } } } private MetaData buildMetaDataForVersion(MetaData metaData, long version) { ImmutableOpenMap.Builder<String, IndexMetaData> indices = ImmutableOpenMap.builder(metaData.indices()); indices.put("test" + version, IndexMetaData.builder("test" + version).settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards((int) version).numberOfReplicas(0).build()); return MetaData.builder(metaData) .transientSettings(Settings.builder().put("test", version).build()) .indices(indices.build()) .build(); } private void assertProperMetaDataForVersion(MetaData metaData, long version) { for (long i = 1; i <= version; i++) { assertThat(metaData.index("test" + i), notNullValue()); assertThat(metaData.index("test" + i).getNumberOfShards(), equalTo((int) i)); } assertThat(metaData.index("test" + (version + 1)), nullValue()); assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); } public void publishStateAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { publishState(action, state, previousState).await(1, TimeUnit.SECONDS); } public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { final int minimumMasterNodes = randomIntBetween(-1, state.nodes().getMasterNodes().size()); return publishState(action, state, previousState, minimumMasterNodes); } public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState, int minMasterNodes) throws InterruptedException { AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); action.publish(changedEvent, minMasterNodes, assertingAckListener); return assertingAckListener; } public static class AssertingAckListener implements Discovery.AckListener { private final List<Tuple<DiscoveryNode, Throwable>> errors = new CopyOnWriteArrayList<>(); private final AtomicBoolean timeoutOccurred = new AtomicBoolean(); private final CountDownLatch countDown; public AssertingAckListener(int nodeCount) { countDown = new CountDownLatch(nodeCount); } @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { if (e != null) { errors.add(new Tuple<>(node, e)); } countDown.countDown(); } @Override public void onTimeout() { timeoutOccurred.set(true); // Fast forward the counter - no reason to wait here long currentCount = countDown.getCount(); for (long i = 0; i < currentCount; i++) { countDown.countDown(); } } public void await(long timeout, TimeUnit unit) throws InterruptedException { assertThat(awaitErrors(timeout, unit), emptyIterable()); } public List<Tuple<DiscoveryNode, Throwable>> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { countDown.await(timeout, unit); assertFalse(timeoutOccurred.get()); return errors; } } void assertSameState(ClusterState actual, ClusterState expected) { assertThat(actual, notNullValue()); final String reason = "\n--> actual ClusterState: " + actual.prettyPrint() + "\n--> expected ClusterState:" + expected.prettyPrint(); assertThat("unequal UUIDs" + reason, actual.stateUUID(), equalTo(expected.stateUUID())); assertThat("unequal versions" + reason, actual.version(), equalTo(expected.version())); } void assertSameStateFromDiff(ClusterState actual, ClusterState expected) { assertSameState(actual, expected); assertTrue(actual.wasReadFromDiff()); } void assertSameStateFromFull(ClusterState actual, ClusterState expected) { assertSameState(actual, expected); assertFalse(actual.wasReadFromDiff()); } static class MockPublishAction extends PublishClusterStateAction { AtomicBoolean timeoutOnSend = new AtomicBoolean(); AtomicBoolean errorOnSend = new AtomicBoolean(); AtomicBoolean timeoutOnCommit = new AtomicBoolean(); AtomicBoolean errorOnCommit = new AtomicBoolean(); public MockPublishAction(Settings settings, TransportService transportService, Supplier<ClusterState> clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) { super(settings, transportService, clusterStateSupplier, listener, discoverySettings, clusterName); } @Override protected void handleIncomingClusterStateRequest(BytesTransportRequest request, TransportChannel channel) throws IOException { if (errorOnSend.get()) { throw new ElasticsearchException("forced error on incoming cluster state"); } if (timeoutOnSend.get()) { return; } super.handleIncomingClusterStateRequest(request, channel); } @Override protected void handleCommitRequest(PublishClusterStateAction.CommitClusterStateRequest request, TransportChannel channel) { if (errorOnCommit.get()) { throw new ElasticsearchException("forced error on incoming commit"); } if (timeoutOnCommit.get()) { return; } super.handleCommitRequest(request, channel); } } static class CapturingTransportChannel implements TransportChannel { AtomicReference<TransportResponse> response = new AtomicReference<>(); AtomicReference<Throwable> error = new AtomicReference<>(); public void clear() { response.set(null); error.set(null); } @Override public String action() { return "_noop_"; } @Override public String getProfileName() { return "_noop_"; } @Override public void sendResponse(TransportResponse response) throws IOException { this.response.set(response); assertThat(error.get(), nullValue()); } @Override public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { this.response.set(response); assertThat(error.get(), nullValue()); } @Override public void sendResponse(Exception exception) throws IOException { this.error.set(exception); assertThat(response.get(), nullValue()); } @Override public long getRequestId() { return 0; } @Override public String getChannelType() { return "capturing"; } } }
Java
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/kinesisanalytics/model/LambdaOutputUpdate.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace KinesisAnalytics { namespace Model { LambdaOutputUpdate::LambdaOutputUpdate() : m_resourceARNUpdateHasBeenSet(false), m_roleARNUpdateHasBeenSet(false) { } LambdaOutputUpdate::LambdaOutputUpdate(JsonView jsonValue) : m_resourceARNUpdateHasBeenSet(false), m_roleARNUpdateHasBeenSet(false) { *this = jsonValue; } LambdaOutputUpdate& LambdaOutputUpdate::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("ResourceARNUpdate")) { m_resourceARNUpdate = jsonValue.GetString("ResourceARNUpdate"); m_resourceARNUpdateHasBeenSet = true; } if(jsonValue.ValueExists("RoleARNUpdate")) { m_roleARNUpdate = jsonValue.GetString("RoleARNUpdate"); m_roleARNUpdateHasBeenSet = true; } return *this; } JsonValue LambdaOutputUpdate::Jsonize() const { JsonValue payload; if(m_resourceARNUpdateHasBeenSet) { payload.WithString("ResourceARNUpdate", m_resourceARNUpdate); } if(m_roleARNUpdateHasBeenSet) { payload.WithString("RoleARNUpdate", m_roleARNUpdate); } return payload; } } // namespace Model } // namespace KinesisAnalytics } // namespace Aws
Java
package com.orientechnologies.orient.core.index; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.testng.Assert; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import com.orientechnologies.common.collection.OCompositeKey; import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; import com.orientechnologies.orient.core.metadata.schema.OType; import com.orientechnologies.orient.core.record.impl.ODocument; @Test @SuppressWarnings("unchecked") public class OSimpleKeyIndexDefinitionTest { private OSimpleKeyIndexDefinition simpleKeyIndexDefinition; @BeforeMethod public void beforeMethod() { simpleKeyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER, OType.STRING); } @Test public void testGetFields() { Assert.assertTrue(simpleKeyIndexDefinition.getFields().isEmpty()); } @Test public void testGetClassName() { Assert.assertNull(simpleKeyIndexDefinition.getClassName()); } @Test public void testCreateValueSimpleKey() { final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER); final Object result = keyIndexDefinition.createValue("2"); Assert.assertEquals(result, 2); } @Test public void testCreateValueCompositeKeyListParam() { final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("2", "3")); final OCompositeKey compositeKey = new OCompositeKey(Arrays.asList(2, "3")); Assert.assertEquals(result, compositeKey); } @Test public void testCreateValueCompositeKeyNullListParam() { final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList((Object) null)); Assert.assertNull(result); } @Test public void testNullParamListItem() { final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("2", null)); Assert.assertNull(result); } @Test public void testWrongParamTypeListItem() { final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("a", "3")); Assert.assertNull(result); } @Test public void testCreateValueCompositeKey() { final Object result = simpleKeyIndexDefinition.createValue("2", "3"); final OCompositeKey compositeKey = new OCompositeKey(Arrays.asList(2, "3")); Assert.assertEquals(result, compositeKey); } @Test public void testCreateValueCompositeKeyNullParamList() { final Object result = simpleKeyIndexDefinition.createValue((List<?>) null); Assert.assertNull(result); } @Test public void testCreateValueCompositeKeyNullParam() { final Object result = simpleKeyIndexDefinition.createValue((Object) null); Assert.assertNull(result); } @Test public void testCreateValueCompositeKeyEmptyList() { final Object result = simpleKeyIndexDefinition.createValue(Collections.<Object> emptyList()); Assert.assertNull(result); } @Test public void testNullParamItem() { final Object result = simpleKeyIndexDefinition.createValue("2", null); Assert.assertNull(result); } @Test public void testWrongParamType() { final Object result = simpleKeyIndexDefinition.createValue("a", "3"); Assert.assertNull(result); } @Test public void testParamCount() { Assert.assertEquals(simpleKeyIndexDefinition.getParamCount(), 2); } @Test public void testParamCountOneItem() { final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER); Assert.assertEquals(keyIndexDefinition.getParamCount(), 1); } @Test public void testGetKeyTypes() { Assert.assertEquals(simpleKeyIndexDefinition.getTypes(), new OType[] { OType.INTEGER, OType.STRING }); } @Test public void testGetKeyTypesOneType() { final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.BOOLEAN); Assert.assertEquals(keyIndexDefinition.getTypes(), new OType[] { OType.BOOLEAN }); } @Test public void testReload() { final ODatabaseDocumentTx databaseDocumentTx = new ODatabaseDocumentTx("memory:osimplekeyindexdefinitiontest"); databaseDocumentTx.create(); final ODocument storeDocument = simpleKeyIndexDefinition.toStream(); storeDocument.save(); final ODocument loadDocument = databaseDocumentTx.load(storeDocument.getIdentity()); final OSimpleKeyIndexDefinition loadedKeyIndexDefinition = new OSimpleKeyIndexDefinition(); loadedKeyIndexDefinition.fromStream(loadDocument); databaseDocumentTx.drop(); Assert.assertEquals(loadedKeyIndexDefinition, simpleKeyIndexDefinition); } @Test(expectedExceptions = OIndexException.class) public void testGetDocumentValueToIndex() { simpleKeyIndexDefinition.getDocumentValueToIndex(new ODocument()); } }
Java
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class systemglobal_authenticationpolicy_binding(base_resource) : """ Binding class showing the authenticationpolicy that can be bound to systemglobal. """ def __init__(self) : self._policyname = "" self._priority = 0 self._builtin = [] self.___count = 0 @property def priority(self) : ur"""The priority of the command policy. """ try : return self._priority except Exception as e: raise e @priority.setter def priority(self, priority) : ur"""The priority of the command policy. """ try : self._priority = priority except Exception as e: raise e @property def builtin(self) : ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE. """ try : return self._builtin except Exception as e: raise e @builtin.setter def builtin(self, builtin) : ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE """ try : self._builtin = builtin except Exception as e: raise e @property def policyname(self) : ur"""The name of the command policy. """ try : return self._policyname except Exception as e: raise e @policyname.setter def policyname(self, policyname) : ur"""The name of the command policy. """ try : self._policyname = policyname except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(systemglobal_authenticationpolicy_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.systemglobal_authenticationpolicy_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : return 0 except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = systemglobal_authenticationpolicy_binding() updateresource.policyname = resource.policyname updateresource.priority = resource.priority return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].policyname = resource[i].policyname updateresources[i].priority = resource[i].priority return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = systemglobal_authenticationpolicy_binding() deleteresource.policyname = resource.policyname return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].policyname = resource[i].policyname return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service) : ur""" Use this API to fetch a systemglobal_authenticationpolicy_binding resources. """ try : obj = systemglobal_authenticationpolicy_binding() response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, filter_) : ur""" Use this API to fetch filtered set of systemglobal_authenticationpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service) : ur""" Use this API to count systemglobal_authenticationpolicy_binding resources configued on NetScaler. """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, filter_) : ur""" Use this API to count the filtered set of systemglobal_authenticationpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class Builtin: MODIFIABLE = "MODIFIABLE" DELETABLE = "DELETABLE" IMMUTABLE = "IMMUTABLE" class systemglobal_authenticationpolicy_binding_response(base_response) : def __init__(self, length=1) : self.systemglobal_authenticationpolicy_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.systemglobal_authenticationpolicy_binding = [systemglobal_authenticationpolicy_binding() for _ in range(length)]
Java
/* * Copyright 2000-2010 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang.ant.refactoring; import com.intellij.codeInsight.TargetElementUtilBase; import com.intellij.lang.ant.dom.AntDomFileDescription; import com.intellij.openapi.actionSystem.CommonDataKeys; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.actionSystem.LangDataKeys; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.project.IndexNotReadyException; import com.intellij.openapi.project.Project; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiReference; import com.intellij.psi.xml.XmlFile; import com.intellij.refactoring.rename.PsiElementRenameHandler; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.Collection; /** * @author Eugene Zhuravlev * Date: Mar 19, 2007 */ public final class AntRenameHandler extends PsiElementRenameHandler { public boolean isAvailableOnDataContext(final DataContext dataContext) { final PsiElement[] elements = getElements(dataContext); return elements != null && elements.length > 1; } public void invoke(@NotNull final Project project, final Editor editor, final PsiFile file, final DataContext dataContext) { final PsiElement[] elements = getElements(dataContext); if (elements != null && elements.length > 0) { invoke(project, new PsiElement[]{elements[0]}, dataContext); } } public void invoke(@NotNull final Project project, @NotNull final PsiElement[] elements, final DataContext dataContext) { super.invoke(project, elements, dataContext); } @Nullable private static PsiElement[] getElements(DataContext dataContext) { final PsiFile psiFile = CommonDataKeys.PSI_FILE.getData(dataContext); if (!(psiFile instanceof XmlFile && AntDomFileDescription.isAntFile((XmlFile)psiFile))) { return null; } final Editor editor = LangDataKeys.EDITOR.getData(dataContext); if (editor == null) { return null; } return getPsiElementsIn(editor, psiFile); } @Nullable private static PsiElement[] getPsiElementsIn(final Editor editor, final PsiFile psiFile) { try { final PsiReference reference = TargetElementUtilBase.findReference(editor, editor.getCaretModel().getOffset()); if (reference == null) { return null; } final Collection<PsiElement> candidates = TargetElementUtilBase.getInstance().getTargetCandidates(reference); return ContainerUtil.toArray(candidates, new PsiElement[candidates.size()]); } catch (IndexNotReadyException e) { return null; } } }
Java
""" Drone.io badge generator. Currently set up to work on Mac. Requires Pillow. """ import os from PIL import Image, ImageDraw, ImageFont SIZE = (95, 18) def hex_colour(hex): if hex[0] == '#': hex = hex[1:] return ( int(hex[:2], 16), int(hex[2:4], 16), int(hex[4:6], 16), ) BACKGROUND = hex_colour('#4A4A4A') SUCCESS = hex_colour('#94B944') WARNING = hex_colour('#E4A83C') ERROR = hex_colour('#B10610') SUCCESS_CUTOFF = 85 WARNING_CUTOFF = 45 FONT = ImageFont.truetype(size=10, filename="/Library/Fonts/Arial.ttf") FONT_SHADOW = hex_colour('#525252') PADDING_TOP = 3 def build_image(percentage, colour): image = Image.new('RGB', SIZE, color=BACKGROUND) drawing = ImageDraw.Draw(image) drawing.rectangle([(55, 0), SIZE], colour, colour) drawing.text((8, PADDING_TOP+1), 'coverage', font=FONT, fill=FONT_SHADOW) drawing.text((7, PADDING_TOP), 'coverage', font=FONT) drawing.text((63, PADDING_TOP+1), '%s%%' % percentage, font=FONT, fill=FONT_SHADOW) drawing.text((62, PADDING_TOP), '%s%%' % percentage, font=FONT) return image os.chdir('_build') for i in range(101): filename = '%i.png' % i file = open(filename, 'wb') if i < WARNING_CUTOFF: build_image(i, ERROR).save(file) elif i < SUCCESS_CUTOFF: build_image(i, WARNING).save(file) else: build_image(i, SUCCESS).save(file)
Java