Diff
stringlengths 5
2k
| FaultInducingLabel
int64 0
1
|
---|---|
import org.apache.ambari.server.state.DesiredConfig;
public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs)); | 0 |
import org.apache.accumulo.core.client.AccumuloClient;
AccumuloClient c = getAccumuloClient();
private long query(AccumuloClient c, String table, int depth, long start, long end, int num,
int step) throws Exception {
private void write(AccumuloClient c, String table, int depth, long start, long end, int step) | 0 |
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at | 0 |
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//fileupload/src/java/org/apache/commons/fileupload/FileItem.java,v 1.10 2002/12/20 04:09:07 dion Exp $
* $Revision: 1.10 $
* $Date: 2002/12/20 04:09:07 $
import java.io.Serializable;
* @version $Id: FileItem.java,v 1.10 2002/12/20 04:09:07 dion Exp $
public interface FileItem extends Serializable | 0 |
import org.apache.commons.lang.StringUtils; | 0 |
private static final Logger LOG = LoggerFactory.getLogger(UnboundedSourceSystem.class);
LOG.info("System " + systemName + " does not have producer.");
return null; | 0 |
import java.util.HashSet;
import java.util.Set;
Set<RequestEntity> requestEntities = new HashSet<>();
Set<StageEntity> stageEntities = new HashSet<>();
StageEntity stage = task.getStage();
stage.setStatus(HostRoleStatus.PENDING);
stageEntities.add(stage);
RequestEntity request = stage.getRequest();
request.setStatus(HostRoleStatus.IN_PROGRESS);
requestEntities.add(request);
for (StageEntity stageEntity : stageEntities) {
stageDAO.merge(stageEntity);
}
for (RequestEntity requestEntity : requestEntities) {
requestDAO.merge(requestEntity);
}
| 1 |
import java.io.File;
import org.apache.accumulo.core.conf.Property;
import org.apache.commons.io.FileUtils;
@Test(timeout = 30000)
public void testPerTableClasspath() throws Exception {
Connector conn = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZookeepers()).getConnector("root", "superSecret");
conn.tableOperations().create("table2");
File jarFile = File.createTempFile("iterator", ".jar");
FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), jarFile);
jarFile.deleteOnExit();
conn.instanceOperations().setProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1", jarFile.toURI().toString());
conn.tableOperations().setProperty("table2", Property.TABLE_CLASSPATH.getKey(), "cx1");
conn.tableOperations().attachIterator("table2", new IteratorSetting(100, "foocensor", "org.apache.accumulo.test.FooFilter"));
BatchWriter bw = conn.createBatchWriter("table2", new BatchWriterConfig());
Mutation m1 = new Mutation("foo");
m1.put("cf1", "cq1", "v2");
m1.put("cf1", "cq2", "v3");
bw.addMutation(m1);
Mutation m2 = new Mutation("bar");
m2.put("cf1", "cq1", "v6");
m2.put("cf1", "cq2", "v7");
bw.addMutation(m2);
bw.close();
Scanner scanner = conn.createScanner("table2", new Authorizations());
int count = 0;
for (Entry<Key,Value> entry : scanner) {
Assert.assertFalse(entry.getKey().getRowData().toString().toLowerCase().contains("foo"));
count++;
}
Assert.assertEquals(2, count);
conn.instanceOperations().removeProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1");
conn.tableOperations().delete("table2");
}
| 0 |
addDependency(Role.HUE_SERVER, RoleCommand.START, Role.HCAT, | 0 |
import org.apache.commons.logging.Log;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Map;
return getMap(c).findMethod(key);
return getMap(c).findField(c, key);
if (Modifier.isPublic(ictor.getModifiers()) && Permissions.allow(ictor)) {
l.add(ictor);
} | 0 |
FileSKVWriter writer = new RFileOperations().openWriter(newMapFile.toString(), ns, ns.getConf(), null, acuConf);
ns, ns.getConf(), null, acuConf);
ns, ns.getConf(), null, acuConf));
reader = FileOperations.getInstance().openReader(mapfile.toString(), false, ns, ns.getConf(), null, acuConf);
FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), true, ns, ns.getConf(), null, acuConf); | 1 |
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assume.assumeTrue;
import com.google.common.collect.ImmutableList;
import java.net.Inet4Address;
import org.apache.beam.vendor.grpc.v1_13_1.io.netty.channel.epoll.Epoll;
Server server =
serverFactory.allocateAddressAndCreate(ImmutableList.of(service), descriptorBuilder);
server = serverFactory.allocateAddressAndCreate(ImmutableList.of(service), descriptorBuilder);
@Test
public void testCreatingEpollServer() throws Exception {
assumeTrue(Epoll.isAvailable());
// tcnative only supports the ipv4 address family
assumeTrue(InetAddress.getLoopbackAddress() instanceof Inet4Address);
Endpoints.ApiServiceDescriptor apiServiceDescriptor =
runTestUsing(ServerFactory.createEpollSocket(), ManagedChannelFactory.createEpoll());
HostAndPort hostAndPort = HostAndPort.fromString(apiServiceDescriptor.getUrl());
assertThat(
hostAndPort.getHost(),
anyOf(
equalTo(InetAddress.getLoopbackAddress().getHostName()),
equalTo(InetAddress.getLoopbackAddress().getHostAddress())));
assertThat(hostAndPort.getPort(), allOf(greaterThan(0), lessThan(65536)));
}
@Test
public void testCreatingUnixDomainSocketServer() throws Exception {
assumeTrue(Epoll.isAvailable());
Endpoints.ApiServiceDescriptor apiServiceDescriptor =
runTestUsing(ServerFactory.createEpollDomainSocket(), ManagedChannelFactory.createEpoll());
assertThat(
apiServiceDescriptor.getUrl(),
startsWith("unix://" + System.getProperty("java.io.tmpdir")));
}
Server server =
serverFactory.allocateAddressAndCreate(
ImmutableList.of(service), apiServiceDescriptorBuilder); | 0 |
package org.apache.commons.codec; | 1 |
import static com.google.common.base.Preconditions.checkArgument;
checkArgument(m != null, "m is null");
checkArgument(iterable != null, "iterable is null"); | 0 |
import static org.junit.jupiter.api.Assertions.fail;
@org.junit.jupiter.api.Test
@org.junit.jupiter.api.Test | 0 |
* <p>These positions roughly correspond to hashes of keys. In case of hash collisions, | 0 |
public MinorCompactor(TabletServer tabletServer, Tablet tablet, InMemoryMap imm,
FileRef mergeFile, DataFileValue dfv, FileRef outputFile, MinorCompactionReason mincReason,
TableConfiguration tableConfig) {
super(tabletServer, tablet, toFileMap(mergeFile, dfv), imm, outputFile, true,
new CompactionEnv() {
@Override
public boolean isCompactionEnabled() {
return true;
}
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.minc;
}
@Override
public RateLimiter getReadLimiter() {
return null;
}
@Override
public RateLimiter getWriteLimiter() {
return null;
}
}, Collections.<IteratorSetting> emptyList(), mincReason.ordinal(), tableConfig);
return Tables.getTableState(tabletServer.getInstance(),
extent.getTableId()) == TableState.DELETING;
// log.debug(String.format("MinC %,d recs in | %,d recs out | %,d recs/sec | %6.3f secs |
// %,d bytes ",map.size(), entriesCompacted,
ProblemReports.getInstance(tabletServer).deleteProblemReport(getExtent().getTableId(),
ProblemType.FILE_WRITE, outputFileName);
ProblemReports.getInstance(tabletServer).report(new ProblemReport(
getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName, e));
// if this is coming from a user iterator, it is possible that the user could change the
// iterator config and that the
ProblemReports.getInstance(tabletServer).report(new ProblemReport(
getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName, e)); | 0 |
input.getPipeline(),
return PDone.in(input.getPipeline());
public PDone apply(PCollection<TableRow> input) {
input.apply(ParDo.of(new TagWithUniqueIds()));
return PDone.in(input.getPipeline()); | 0 |
} catch (final NullPointerException ex) {
} catch (final NullPointerException ex) {
} catch (final NullPointerException ex) {
} catch (final NullPointerException ex) {
} catch (final NullPointerException ex) { | 0 |
selectInterval, ioThreadCount,
connectTimeout != null ? connectTimeout : TimeValue.ZERO_MILLIS,
soReuseAddress,
soLinger != null ? soLinger : TimeValue.NEG_ONE_SECONDS,
soKeepAlive,
tcpNoDelay,
soTimeout != null ? soTimeout : TimeValue.ZERO_MILLIS,
sndBufSize, rcvBufSize, backlogSize); | 0 |
* @version $Id$ | 0 |
String tableName = ((org.apache.accumulo.core.client.mapreduce.RangeInputSplit) context
.getInputSplit()).getTableName();
job.setInputFormatClass(
org.apache.accumulo.core.client.mapreduce.AccumuloMultiTableInputFormat.class);
org.apache.accumulo.core.client.mapreduce.AccumuloMultiTableInputFormat
.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapreduce.AccumuloMultiTableInputFormat.setConnectorInfo(job,
ci.getPrincipal(), ci.getAuthenticationToken());
org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig1 = new org.apache.accumulo.core.client.mapreduce.InputTableConfig();
org.apache.accumulo.core.client.mapreduce.InputTableConfig tableConfig2 = new org.apache.accumulo.core.client.mapreduce.InputTableConfig();
Map<String,org.apache.accumulo.core.client.mapreduce.InputTableConfig> configMap = new HashMap<>();
org.apache.accumulo.core.client.mapreduce.AccumuloMultiTableInputFormat
.setInputTableConfigs(job, configMap); | 0 |
import java.awt.geom.Ellipse2D; | 0 |
public class ClientRetry extends ClientBase {
try {
cdw1.waitForConnected(CONNECTION_TIMEOUT);
ZooKeeper zk2 = new ZooKeeper(hostPort, 10000, cdw2);
try {
States s1 = zk.getState();
States s2 = zk2.getState();
assertSame(s1,States.CONNECTED);
assertSame(s2,States.CONNECTING);
cdw1.reset();
cdw1.waitForDisconnected(CONNECTION_TIMEOUT);
cdw2.waitForConnected(CONNECTION_TIMEOUT);
assertSame(zk2.getState(),States.CONNECTED);
} finally {
zk2.close();
}
} finally {
zk.close();
} | 0 |
* @version $Revision$ | 1 |
return getGlyphLogicalBounds(glyphIndex).getBounds2D(); | 0 |
import java.io.Serializable; | 0 |
import java.util.HashMap;
import java.util.Map;
import org.apache.ambari.server.api.resources.ResourceInstance;
import org.apache.ambari.server.controller.spi.Resource; | 0 |
final StringBuilder buffer = new StringBuilder(""); | 1 |
package org.apache.felix.sigil.eclipse.ui.internal.refactor; | 0 |
import org.apache.beam.sdk.testing.DataflowPortabilityApiUnsupported;
@Category({
ValidatesRunner.class,
UsesBoundedSplittableParDo.class,
DataflowPortabilityApiUnsupported.class
}) | 0 |
Utils.unreserveTableNamespace(tableNamespaceInfo.namespaceId, tid, true); | 0 |
package aQute.lib.spring;
import java.util.*;
import aQute.lib.osgi.*;
/**
* This component is called when we find a resource in the META-INF/*.xml
* pattern. We parse the resource and and the imports to the builder.
*
* Parsing is done with XSLT (first time I see the use of having XML for the
* Spring configuration files!).
*
* @author aqute
*
*/
public class SpringXMLType extends XMLTypeProcessor {
protected List<XMLType> getTypes(Analyzer analyzer) throws Exception {
List<XMLType> types = new ArrayList<XMLType>();
String header = analyzer.getProperty("Bundle-Blueprint", "OSGI-INF/blueprint");
process(types,"extract.xsl", header, ".*\\.xml");
header = analyzer.getProperty("Spring-Context", "META-INF/spring");
process(types,"extract.xsl", header, ".*\\.xml");
return types;
}
} | 0 |
* Class to be extended by operator builders that want to make use of `applyIf` call.
*
* | 0 |
Class implementingClass = getImplementingClass(algorithmURI);
(CanonicalizerSpi) implementingClass.newInstance();
Class registeredClass = getImplementingClass(algorithmURI);
if (registeredClass != null) {
try {
_canonicalizerHash.put(algorithmURI, Class.forName(implementingClass));
} catch (ClassNotFoundException e) {
throw new RuntimeException("c14n class not found");
}
private static Class getImplementingClass(String URI) {
return (Class) _canonicalizerHash.get(URI); | 0 |
package org.apache.zookeeper.server;
import java.util.HashSet;
import java.util.Map;
import org.apache.zookeeper.server.DataTree;
DataTree dataTree;
this.dataTree = dataTree;
public long approximateDataSize() {
return dataTree.approximateDataSize();
public int countEphemerals() {
Map<Long, HashSet<String>> map = dataTree.getEphemeralsMap();
int result = 0;
for (HashSet<String> set : map.values()) {
result += set.size();
}
return result;
public String getLastZxid() {
return "0x" + Long.toHexString(dataTree.lastProcessedZxid); | 0 |
ResourceResolver.getInstance(uriAttr, "http://www.apache.org", true);
ResourceResolver res = ResourceResolver.getInstance(uriAttr, file, false); | 0 |
package org.apache.commons.vfs2.provider.ram;
import org.apache.commons.vfs2.FileName;
import org.apache.commons.vfs2.FileSystemException;
import org.apache.commons.vfs2.FileType; | 1 |
public void inform(ContentStoreEvent event) {
final Session session = ObjectModelHelper.getRequest(this.portalService.getProcessInfoProvider().getObjectModel()).getSession(); | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.configuration.event;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.DatabaseConfigurationTestHelper;
/**
* A test class for the events generated by DatabaseConfiguration.
*
* @author hacker
* @version $Id$
*/
public class TestDatabaseConfigurationEvents extends
AbstractTestConfigurationEvents
{
/** The test helper. */
private DatabaseConfigurationTestHelper helper;
protected void setUp() throws Exception
{
helper = new DatabaseConfigurationTestHelper();
helper.setUp();
super.setUp();
}
protected void tearDown() throws Exception
{
helper.tearDown();
super.tearDown();
}
protected AbstractConfiguration createConfiguration()
{
return helper.setUpConfig();
}
} | 0 |
.filter(impl -> type.isAssignableFrom(Helpers.getClass(impl))) | 0 |
import org.apache.sshd.common.util.buffer.Buffer;
import org.apache.sshd.common.util.buffer.BufferUtils;
import org.apache.sshd.common.util.buffer.ByteArrayBuffer;
@Override
buffer = new ByteArrayBuffer();
buffer.putBytes(V_C);
buffer.putBytes(V_S);
buffer.putBytes(I_C);
buffer.putBytes(I_S);
buffer.putBytes(K_S);
buffer.putBytes(sig.sign());
buffer.putBytes(K_S);
buffer.putBytes(f);
buffer.putBytes(sigH); | 0 |
private static final String HTTP_REALM_NAME = "Apache Aurora Scheduler"; | 0 |
import java.nio.charset.Charset;
private static final Charset utf8 = Charset.forName("UTF8");
socket.getOutputStream().write(out.getBytes(utf8)); | 0 |
import java.io.ByteArrayInputStream;
/**
* @param source
* @param ctx
* @return the Node resulting from the parse of the source
* @throws XMLEncryptionException
*/
public Node deserialize(byte[] source, Node ctx) throws XMLEncryptionException {
byte[] fragment = createContext(source, ctx);
return deserialize(ctx, new StreamSource(new ByteArrayInputStream(fragment)));
}
return deserialize(ctx, new StreamSource(new StringReader(fragment)));
}
/**
* @param ctx
* @param source
* @return the Node resulting from the parse of the source
* @throws XMLEncryptionException
*/
private Node deserialize(Node ctx, Source source) throws XMLEncryptionException {
transformer.transform(source, res); | 0 |
* @author William Farner
@Ignore("// TODO(William Farner): Flakes when run on machines in the data center.")
@Ignore("// TODO(William Farner): Flakes when run on machines in the data center.") | 0 |
class JarRevision extends BundleArchiveRevision
// Parse the main attributes of the manifest of the given jarfile.
// The idea is to not open the jar file as a java.util.jarfile but
// than 64k or of the size of the manifest.
// The InputStream is already
// buffered and can handle up to 64K buffers in one go.
// Now parse the main attributes. The idea is to do that
// the manifest bytes inside the bytes array and write them back into
// That allows us to create the strings from the bytes array without the skipped
// skip \r and \n if it is follows by another \n
// if we are at the end of a line
// Otherwise, parse the value and add it to the map (we throw an
if (key == null) | 0 |
private final JavaDStream<WindowedValue<T>> dStream;
List<Integer> getStreamSources() { | 0 |
String stageName = BeamSqlRelUtils.getStageName(this) + "_";
upstream = upstream.apply(stageName + "assignEventTimestamp", WithTimestamps
.of(new BeamAggregationTransforms.WindowTimestampFn(windowFieldIdx)))
PCollection<BeamSqlRow> windowStream = upstream.apply(stageName + "window",
Window.into(windowFn)
stageName + "exCombineBy",
.setCoder(KvCoder.of(keyCoder, upstream.getCoder()));
stageName + "combineBy",
PCollection<BeamSqlRow> mergedStream = aggregatedStream.apply(stageName + "mergeRecord", | 0 |
this.log.debug( "Generating java annotation description for: " + className );
this.log.debug( "Generating qdox description for: " + className );
else
{
index++;
} | 0 |
private static final Configuration hadoopConf = new Configuration();
final Configuration actualConf = CredentialProviderFactoryShim.getConfiguration(hadoopConf,
path); | 0 |
/**
* A {@link BoundedSource} created from {@link BoundedDataSource}.
*/
// TODO
// TODO | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 1 |
createAggregator("emptyLines", Sum.ofLongs()); | 0 |
import org.apache.batik.script.ImportInfo; | 0 |
public void testAddIfRoom() {
public void testAddIfRoomOverhead() {
public void testNegativeMemory() {
public void testZeroMemory() { | 0 |
} else { // should not happen because of pre-validation in UnitValidator
/**
* @return property value with removed unit
*/
@Override
public String updateForBlueprintExport(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
PropertyUnit stackUnit = PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, propertyName);
PropertyValue value = PropertyValue.of(propertyName, origValue);
return value.withoutUnit(stackUnit);
}
public String withoutUnit(PropertyUnit unit) {
return hasUnit(unit)
? value.substring(0, value.length() - unit.toString().length())
: value;
}
| 0 |
* $Id$
* $Rev$
* $Date$
* Copyright 2001-2005 The Apache Software Foundation | 0 |
import com.twitter.common.quantity.Amount;
import com.twitter.common.quantity.Data;
private static final Resources DEFAULT_OFFER =
new Resources(DEFAULT_CPUS, Amount.of(DEFAULT_RAM, Data.MB), 0);
public void testSufficientPorts() throws Exception {
control.replay();
Resources twoPorts = new Resources(DEFAULT_CPUS, Amount.of(DEFAULT_RAM, Data.MB), 2);
Predicate<TwitterTaskInfo> filter = defaultFilter.staticFilter(twoPorts, HOST_A);
TwitterTaskInfo noPortTask = makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
.setStartCommand("%task_id%");
TwitterTaskInfo onePortTask = makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
.setStartCommand("%port:one% %port:one% %port:one% %port:one%");
TwitterTaskInfo twoPortTask = makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
.setStartCommand("%port:one% %port:two% %port:two%");
TwitterTaskInfo threePortTask = makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
.setStartCommand("%port:one% %port:two% %port:three%");
assertThat(filter.apply(noPortTask), is(true));
assertThat(filter.apply(onePortTask), is(true));
assertThat(filter.apply(twoPortTask), is(true));
assertThat(filter.apply(threePortTask), is(false));
}
@Test | 0 |
import org.apache.hc.core5.net.InetAddressUtils;
InetAddressUtils.formatAddress(buffer, localAddress);
InetAddressUtils.formatAddress(buffer, remoteAddress); | 0 |
import org.apache.hc.core5.http.ClassicHttpResponse;
final ClassicHttpResponse response, final HttpEntity entity) throws IOException { | 0 |
* @version CVS $Id: ValidationResult.java,v 1.3 2003/03/16 17:49:14 vgritsenko Exp $ | 0 |
result[i] = currArgument.isHandleQuoting() ? StringUtils.quoteArgument(expandedArgument) : expandedArgument; | 0 |
import java.util.Collection;
if (value instanceof String)
Collection<String> list = getListDelimiterHandler().split((String) value, !isTrimmingDisabled());
return list.size() > 1 ? list : list.iterator().next(); | 0 |
import static org.ops4j.pax.exam.CoreOptions.equinox;
import static org.ops4j.pax.exam.CoreOptions.felix;
import static org.ops4j.pax.exam.CoreOptions.knopflerfish;
public static Option[] configure() {
felix(),
equinox(),
knopflerfish(),
| 0 |
* @version CVS $Id: DefaultLayoutFactory.java,v 1.11 2003/06/14 16:58:02 cziegeler Exp $
// the renderers
final String defaultRenderer = layoutsConf[i].getChild("renderers").getAttribute("default");
desc.setDefaultRendererName(defaultRenderer);
final Configuration[] rendererConfs = layoutsConf[i].getChild("renderers").getChildren("renderer");
if ( rendererConfs != null ) {
boolean found = false;
for(int m=0; m < rendererConfs.length; m++) {
final String rName = rendererConfs[m].getAttribute("name");
desc.addRendererName(rName);
if ( defaultRenderer.equals(rName) ) {
found = true;
}
}
if ( !found ) {
throw new ConfigurationException("Default renderer '" + defaultRenderer + "' is not configured for layout '" + name + "'");
}
} else {
throw new ConfigurationException("Default renderer '" + defaultRenderer + "' is not configured for layout '" + name + "'");
}
throw new ProcessingException("Layout '"+layout.getId()+"' has no associated name.");
throw new ProcessingException("LayoutDescription with name '" + layoutName + "' not found.");
throw new ProcessingException("LayoutDescription with name '" + layoutName + "' not found."); | 0 |
package org.apache.beam.runners.apex.translation.utils;
* @return tuple
* @param <T> tuple type
* @param <T> tuple type
* @param <T> tuple type | 0 |
return Schema.Field.of(name, fieldType).withNullable(true); | 0 |
import org.apache.flink.api.common.functions.FlatCombineFunction;
* Flink {@link org.apache.flink.api.common.functions.FlatCombineFunction} for executing a
public class FlinkPartialReduceFunction<K, VI, VA> implements FlatCombineFunction<KV<K, VI>, KV<K, VA>> {
public void combine(Iterable<KV<K, VI>> elements, Collector<KV<K, VA>> out) throws Exception { | 0 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 0 |
import com.twitter.mesos.gen.Quota;
public void testQuotaStorage() {
assertNull(store.fetchQuota("jane"));
Quota quota = new Quota()
.setNumCpus(5)
.setRamMb(2)
.setDiskMb(10);
store.saveQuota("jane", quota);
assertEquals(quota, store.fetchQuota("jane"));
Quota quota2 = new Quota()
.setNumCpus(1)
.setRamMb(3)
.setDiskMb(5);
store.saveQuota("jane", quota2);
assertEquals(quota2, store.fetchQuota("jane"));
store.removeQuota("jane");
assertNull(store.fetchQuota("jane"));
}
@Test | 0 |
Assert.assertEquals(false, exp1.evaluate(record, null).getValue());
Assert.assertEquals(true, exp2.evaluate(record, null).getValue());
Assert.assertEquals(false, exp1.evaluate(record, null).getValue());
Assert.assertEquals(true, exp2.evaluate(record, null).getValue());
Assert.assertEquals(true, exp1.evaluate(record, null).getValue());
Assert.assertEquals(false, exp2.evaluate(record, null).getValue());
Assert.assertEquals(true, exp1.evaluate(record, null).getValue());
Assert.assertEquals(false, exp2.evaluate(record, null).getValue());
Assert.assertEquals(true, exp1.evaluate(record, null).getValue());
Assert.assertEquals(false, exp2.evaluate(record, null).getValue());
Assert.assertEquals(false, exp1.evaluate(record, null).getValue());
Assert.assertEquals(true, exp2.evaluate(record, null).getValue()); | 0 |
package org.apache.accumulo.master.tableOps.rename;
import org.apache.accumulo.master.tableOps.MasterRepo;
import org.apache.accumulo.master.tableOps.Utils;
Utils.getTableNameLock().lock();
zoo.mutate(tap, null, null, current -> {
final String currentName = new String(current, UTF_8);
if (currentName.equals(newName))
return null; // assume in this case the operation is running again, so we are done
if (!currentName.equals(oldName)) {
throw new AcceptableThriftTableOperationException(null, oldTableName,
TableOperation.RENAME, TableOperationExceptionType.NOTFOUND,
"Name changed while processing");
return newName.getBytes(UTF_8);
Utils.getTableNameLock().unlock(); | 0 |
tester = TriggerTester.nonCombining(
tester = TriggerTester.nonCombining(FixedWindows.of(Duration.millis(50)), | 0 |
import static org.apache.atlas.model.TypeCategory.*;
import static org.apache.atlas.repository.graph.GraphHelper.*;
deleteRelationships(Collections.singleton(edge), false);
* @param forceDelete
public void deleteRelationships(Collection<AtlasEdge> edges, final boolean forceDelete) throws AtlasBaseException {
deleteEdge(edge, isInternal || forceDelete); | 0 |
* Copyright (C) 2015 Google Inc. | 0 |
import org.w3c.dom.Document;
import org.w3c.dom.Element; | 1 |
public void testThrowsIOExceptionWhenMemcachedPutTimesOut() throws Exception {
public void testCachePutThrowsIOExceptionIfCannotSerializeEntry() throws Exception {
public void testSuccessfulCacheGet() throws Exception {
public void testTreatsNoneByteArrayFromMemcachedAsCacheMiss() throws Exception {
public void testTreatsNullFromMemcachedAsCacheMiss() throws Exception {
public void testTreatsAsCacheMissIfCannotReconstituteEntry() throws Exception {
public void testTreatsAsCacheMissIfCantHashStorageKey() throws Exception { | 0 |
* Provides thread-safe helpers for implementing dynamic work rebalancing in position-based bounded
* sources. | 1 |
* @author <a href="mailto:[email protected]">Felix Project Team</a> | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. | 0 |
mockBackend = EasyMock.createNiceMock(HttpClient.class);
mockEntity = EasyMock.createNiceMock(HttpEntity.class);
mockCache = EasyMock.createNiceMock(HttpCache.class); | 0 |
import java.util.List;
import java.util.Set;
List<Request> shouldBeProcessed = new LinkedList<Request>();
Set<Request> shouldNotBeProcessed = new HashSet<Request>();
Set<Request> shouldBeInPending = new HashSet<Request>();
Set<Request> shouldBeProcessedAfterPending = new HashSet<Request>();
Set<Request> nonLocalCommits = new HashSet<Request>();
Set<Request> allReads = new HashSet<Request>();
Set<Request> waitingCommittedRequests = new HashSet<Request>(); | 0 |
package org.osgi.util.converter; | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 0 |
public class ObjectArrayIterator<E>
implements Iterator<E>, ResettableIterator<E> {
protected E[] array = null;
public ObjectArrayIterator(E[] array) {
public ObjectArrayIterator(E array[], int start) {
public ObjectArrayIterator(E array[], int start, int end) {
public E next() {
public E[] getArray() {
public void setArray(E[] array) { | 0 |
(paintElement, SVG_GRADIENT_UNITS_ATTRIBUTE, s, ctx);
// A value of zero will cause the area to be painted as a single color
// using the color and opacity of the last gradient stop. | 1 |
package org.apache.beam.sdk.extensions.euphoria.core.time;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; | 0 |
Throwable cause = e.getCause();
if (null != cause && TableNotFoundException.class.equals(cause.getClass())) {
throw new org.apache.accumulo.proxy.thrift.TableNotFoundException(cause.toString());
} | 0 |
*
@Parameter(names = "--shardTable")
@Parameter(names = "--doc2Term")
| 0 |
if (!canResponseHaveBody(request, response)) {
response.setEntity(null);
}
private boolean canResponseHaveBody(
final HttpRequest request, final HttpResponse response) {
if (request != null && "HEAD".equalsIgnoreCase(request.getRequestLine().getMethod())) {
return false;
}
int status = response.getStatusLine().getStatusCode();
return status >= HttpStatus.SC_OK
&& status != HttpStatus.SC_NO_CONTENT
&& status != HttpStatus.SC_NOT_MODIFIED
&& status != HttpStatus.SC_RESET_CONTENT;
}
| 0 |
* @deprecated since 2.0.0; use {@link #setClientProperties(Class, Configuration, Properties)} instead | 0 |
public final class Introspector {
public Introspector(Log log) {
/**
* Gets a method defined by a class, a name and a set of parameters.
* @param c the class
* @param name the method name
* @param params the method parameters
* @return the desired method object
* @throws MethodKey.AmbiguousException if no unambiguous method could be found through introspection
*/
public Method getMethod(Class<?> c, String name, Object[] params) {
return getMethod(c, new MethodKey(name, params));
} | 1 |
import org.apache.accumulo.core.iterators.user.RegExFilter;
IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
RegExFilter.setRegexs(regex, args[5], args[6], args[7], args[8], false); | 0 |
import org.apache.maven.shared.dependency.graph.DependencyNode;
public DependencyExcluder( DependencyNode dependencyGraph, Collection<Artifact> dependencyArtifacts )
super( dependencyGraph, dependencyArtifacts ); | 0 |
import javax.servlet.ServletException;
* @throws ServletException
public LoggerUtil(ServletConfig config, String knownFile) throws ServletException {
this.settings = CoreUtil.createSettings(config);
this.appContext = CoreUtil.createContext(this.config, this.settings, knownFile); | 0 |
* A processing node builder is used to create the processing statements
* for a sitemap.
* A node builder can either be implemented as a singleton, using
* the ThreadSafe marker interface, or each time a builder is required
* a new instance is created.
* All builders are managed by the {@link NodeBuilderSelector}.
* A node builder can implement the following marker interfaces from
* Avalon: LogEnabled, Contextualizable, Initializable
* and Configurable. Other marker interfaces, like Recyclable, Poolable
* or Disposable are not supported!
* If the builder needs a service manager it can fetch this one from
* the tree builder. | 0 |
public Object getParsedOptionValue(String opt) throws ParseException | 0 |
* @since 4.0 | 0 |
import org.apache.accumulo.core.security.Credentials;
private Credentials credentials;
public TabletServerBatchDeleter(Instance instance, Credentials credentials, String tableId, Authorizations authorizations, int numQueryThreads, | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.