Diff
stringlengths 5
2k
| FaultInducingLabel
int64 0
1
|
---|---|
Mockito.verify(managedConn, Mockito.times(1)).close(); | 0 |
BundleProcessor<PCollectionNode, CommittedBundle<?>, PTransformNode> bundleProcessor,
private final Map<PTransformNode, ConcurrentLinkedQueue<CommittedBundle<?>>> pendingRootBundles;
BundleProcessor<PCollectionNode, CommittedBundle<?>, PTransformNode> bundleProcessor,
Map<PTransformNode, ConcurrentLinkedQueue<CommittedBundle<?>>> pendingRootBundles) {
for (Map.Entry<PTransformNode, ConcurrentLinkedQueue<CommittedBundle<?>>> pendingRootEntry :
pendingRootBundles.entrySet()) { | 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 0 |
import org.apache.ambari.server.state.UpgradeContextFactory;
binder.bind(UpgradeContextFactory.class).toInstance(EasyMock.createNiceMock(UpgradeContextFactory.class)); | 0 |
Collection<Text> remainingSplits = this.getConnector().tableOperations().listSplits(table); | 0 |
// Force computation of DStream.
dStream.dstream().register(); | 0 |
public class DurationCoder extends CustomCoder<ReadableDuration> {
private static final VarLongCoder LONG_CODER = VarLongCoder.of();
LONG_CODER.encode(toLong(value), outStream, context);
return fromLong(LONG_CODER.decode(inStream, context));
}
@Override
public void verifyDeterministic() {
LONG_CODER.verifyDeterministic();
return LONG_CODER.isRegisterByteSizeObserverCheap(toLong(value), context);
LONG_CODER.registerByteSizeObserver(toLong(value), observer, context); | 0 |
* This annotation defines behavioral contract enforced at runtime by instances of annotated classes.
@Retention(RetentionPolicy.CLASS)
public @interface Contract {
ThreadingBehavior threading() default ThreadingBehavior.UNSAFE;
| 0 |
*
*
*
*
*
*
*
*
| 0 |
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at | 0 |
// @formatter:off
org.apache.accumulo.core.client.mapreduce.RangeInputSplit accSplit =
(org.apache.accumulo.core.client.mapreduce.RangeInputSplit) split;
// @formatter:on | 0 |
// To prevent extension outside of this package.
AppliedPTransform() {} | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
/* Cassandra */ | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.commons.bcel6.generic;
/**
* IASTORE - Store into int array
* <PRE>Stack: ..., arrayref, index, value -> ...</PRE>
*
* @version $Id$
* @author <A HREF="mailto:[email protected]">M. Dahm</A>
*/
public class IASTORE extends ArrayInstruction implements StackConsumer {
private static final long serialVersionUID = -3570157505504012648L;
/**
* Store into int array
*/
public IASTORE() {
super(org.apache.commons.bcel6.Constants.IASTORE);
}
/**
* Call corresponding visitor method(s). The order is:
* Call visitor methods of implemented interfaces first, then
* call methods according to the class hierarchy in descending order,
* i.e., the most specific visitXXX() call comes last.
*
* @param v Visitor object
*/
@Override
public void accept( Visitor v ) {
v.visitStackConsumer(this);
v.visitExceptionThrower(this);
v.visitTypedInstruction(this);
v.visitArrayInstruction(this);
v.visitIASTORE(this);
}
} | 1 |
import java.sql.Statement;
private Integer defaultQueryTimeout = null;
/**
* Obtain the default query timeout that will be used for {@link Statement}s
* created from this connection. <code>null</code> means that the driver
* default will be used.
*/
public Integer getDefaultQueryTimeout() {
return defaultQueryTimeout;
}
/**
* Set the default query timeout that will be used for {@link Statement}s
* created from this connection. <code>null</code> means that the driver
* default will be used.
*/
public void setDefaultQueryTimeout(Integer defaultQueryTimeout) {
this.defaultQueryTimeout = defaultQueryTimeout;
}
connectionFactory.setDefaultQueryTimeout(getDefaultQueryTimeout()); | 0 |
import org.apache.http.params.BasicHttpParams;
this.params = new BasicHttpParams(null); | 0 |
@SuppressWarnings("rawtypes")
@Override
public void addedService(ServiceReference ref, Object event) {
getComponentContext().handleEvent(this, EventType.ADDED,
new ServiceEventImpl(m_component, ref, m_serviceInstance));
@SuppressWarnings("rawtypes")
@Override
@SuppressWarnings("rawtypes")
@Override
public void removedService(ServiceReference ref, Object event) {
ServiceEventImpl eventImpl = (ServiceEventImpl) event;
// If we detect that the fwk is stopping, we behave as our superclass. That is:
m_component.handleEvent(this, EventType.REMOVED, new ServiceEventImpl(m_component, ref, m_serviceInstance));
eventImpl.close(); // will unget the service.
// if there is no available services, the next call to invoke() method will block until another service
// becomes available. Else the next call to invoke() will return that highest ranked available service.
ServiceEventImpl event = null;
event = (ServiceEventImpl) m_tracker.waitForService(m_timeout);
if (event == null) {
Object service = event.getEvent();
if (service == null) {
throw new IllegalStateException("Service unavailable: " + m_trackedServiceName.getName());
}
| 0 |
userpass = tpc.proxy().login("root", new TreeMap<String,String>() {
{
put("password", "");
}
});
// TODO: add back in as a test when Mock is improved - ACCUMULO-1306
@Test
tpc.proxy().deleteRows(userpass, testtable, ByteBuffer.wrap("51".getBytes()), ByteBuffer.wrap("99".getBytes())); | 0 |
* DefaultAppleResponse provides a default implementation for the | 0 |
response.getEntity(), ElasticsearchIO.getBackendVersion(connectionConfiguration));
JsonNode searchResult = ElasticsearchIO.parseResponse(response.getEntity());
JsonNode searchResult = parseResponse(response.getEntity()); | 0 |
/*
* Copyright 2001-2004 The Apache Software Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* @version $Revision: 1.6 $ $Date: 2004/02/18 01:15:42 $ | 0 |
for (Header header : headers) {
HEADERS_LOG.debug("<< " + header.toString());
for (Header header : headers) {
HEADERS_LOG.debug(">> " + header.toString()); | 0 |
HttpCacheEntry updated = new BasicHttpCacheEntry( | 0 |
import com.google.cloud.hadoop.util.AbstractGoogleAsyncWriteChannel;
@Test
public void testGcsUploadBufferSizeDefault() throws IOException {
DataflowPipelineOptions batchOptions = buildPipelineOptions();
DataflowRunner.fromOptions(batchOptions);
assertNull(batchOptions.getGcsUploadBufferSizeBytes());
DataflowPipelineOptions streamingOptions = buildPipelineOptions();
streamingOptions.setStreaming(true);
DataflowRunner.fromOptions(streamingOptions);
assertEquals(
AbstractGoogleAsyncWriteChannel.UPLOAD_PIPE_BUFFER_SIZE_DEFAULT,
streamingOptions.getGcsUploadBufferSizeBytes().intValue());
}
@Test
public void testGcsUploadBufferSize() throws IOException {
int gcsUploadBufferSizeBytes = 12345678;
DataflowPipelineOptions batchOptions = buildPipelineOptions();
batchOptions.setGcsUploadBufferSizeBytes(gcsUploadBufferSizeBytes);
DataflowRunner.fromOptions(batchOptions);
assertEquals(gcsUploadBufferSizeBytes, batchOptions.getGcsUploadBufferSizeBytes().intValue());
DataflowPipelineOptions streamingOptions = buildPipelineOptions();
streamingOptions.setStreaming(true);
streamingOptions.setGcsUploadBufferSizeBytes(gcsUploadBufferSizeBytes);
DataflowRunner.fromOptions(streamingOptions);
assertEquals(
gcsUploadBufferSizeBytes, streamingOptions.getGcsUploadBufferSizeBytes().intValue());
}
| 0 |
.add(
DisplayData.item("inferBeamSchema", getInferBeamSchema())
.withLabel("Infer Beam Schema"))
.addIfNotNull(DisplayData.item("schema", String.valueOf(getSchema())))
.addIfNotNull(DisplayData.item("recordClass", getRecordClass()).withLabel("Record Class"))
@Override
public void populateDisplayData(DisplayData.Builder builder) {
super.populateDisplayData(builder);
builder
.add(
DisplayData.item("inferBeamSchema", getInferBeamSchema())
.withLabel("Infer Beam Schema"))
.addIfNotNull(DisplayData.item("schema", String.valueOf(getSchema())))
.addIfNotNull(
DisplayData.item("recordClass", getRecordClass()).withLabel("Record Class"));
}
builder
.add(
DisplayData.item("inferBeamSchema", getInferBeamSchema())
.withLabel("Infer Beam Schema"))
.addIfNotNull(DisplayData.item("schema", String.valueOf(getSchema())))
.addIfNotNull(DisplayData.item("recordClass", getRecordClass()).withLabel("Record Class"))
.include("matchConfiguration", getMatchConfiguration()); | 0 |
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.sshd.client.ClientFactoryManager;
import org.apache.sshd.common.PropertyResolverUtils;
import org.apache.sshd.common.session.SessionHeartbeatController.HeartbeatType;
import org.apache.sshd.common.util.GenericUtils;
public static <C extends SshClient> C setupClientHeartbeat(C client, PropertyResolver props) {
if ((client == null) || (props == null)) {
return client;
}
long interval = PropertyResolverUtils.getLongProperty(
props, CLIENT_LIVECHECK_INTERVAL_PROP, DEFAULT_ALIVE_INTERVAL);
if (interval <= 0L) {
return client;
}
if (PropertyResolverUtils.getBooleanProperty(
props, CLIENT_LIVECHECK_USE_NULLS, DEFAULT_LIVECHECK_USE_NULLS)) {
client.setSessionHeartbeat(HeartbeatType.IGNORE, TimeUnit.SECONDS, interval);
} else {
PropertyResolverUtils.updateProperty(
client, ClientFactoryManager.HEARTBEAT_INTERVAL, TimeUnit.SECONDS.toMillis(interval));
interval = PropertyResolverUtils.getLongProperty(
props, CLIENT_LIVECHECK_REPLIES_WAIT, DEFAULT_LIVECHECK_REPLY_WAIT);
if (interval > 0L) {
PropertyResolverUtils.updateProperty(
client, ClientFactoryManager.HEARTBEAT_REPLY_WAIT, TimeUnit.SECONDS.toMillis(interval));
}
}
return client;
}
public static <C extends SshClient> C setupClientHeartbeat(C client, Map<String, ?> options) {
if ((client == null) || GenericUtils.isEmpty(options)) {
return client;
}
return setupClientHeartbeat(client, PropertyResolverUtils.toPropertyResolver(options));
}
setupClientHeartbeat(client, props); | 0 |
* @since 1.4
* @since 1.4
* @since 2.0
* @since 2.0
* @since 1.3
* @since 1.3 | 1 |
import org.apache.aurora.gen.MaintenanceMode;
MaintenanceMode mode,
Set<Veto> vetoes = delegate.filter(offer, slaveHost, mode, task, taskId, jobState); | 0 |
* @deprecated (4.3) use {@link org.apache.http.impl.client.HttpClientBuilder}. | 0 |
// SQL array indexing is 1 based
return BeamSqlPrimitive.of(outputType, array.get(index - 1)); | 0 |
import org.apache.http.annotation.Immutable;
@Immutable // provided injected dependencies are immutable | 0 |
/** Return only directories? */
private boolean iterateDirectories = false;
public FileIterator(Project project,
Iterator fileSetIterator) {
this( project, fileSetIterator, false);
}
public FileIterator(Project project,
Iterator fileSetIterator,
boolean iterateDirectories) {
this.iterateDirectories = iterateDirectories;
if (iterateDirectories) {
files = ds.getIncludedDirectories();
}
else {
files = ds.getIncludedFiles();
} | 0 |
* Deprecated since 1.9.
*
* @param right the Phoneme to join
* @return a new Phoneme | 0 |
import org.apache.ambari.server.stack.upgrade.UpgradePack; | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
super(new BufferedInputStream(sock.getSocket().getInputStream(), bufferSize), new BufferedOutputStream(sock.getSocket().getOutputStream(), bufferSize));
client = sock.getSocket().getInetAddress().getHostAddress() + ":" + sock.getSocket().getPort();
| 0 |
DataSource newDataSource;
newDataSource = createDataSourceInstance();
newDataSource.setLogWriter(logWriter);
dataSource = newDataSource; | 0 |
Copyright 2000-2003 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/ | 0 |
package org.apache.felix.scr.impl.inject; | 0 |
private final Map resourceBundleCaches;
this.resourceBundleCaches = new HashMap();
* @return the resource bundle - if not bundle with the requested locale exists,
cache = ( ResourceBundleCache ) resourceBundleCaches.get( key ); | 0 |
import org.apache.http.params.HttpConnectionParams;
* <li>{@link org.apache.http.params.CoreConnectionPNames#SO_TIMEOUT}</li>
return new DefaultNHttpServerConnection(
int timeout = HttpConnectionParams.getSoTimeout(this.params);
session.setSocketTimeout(timeout);
this.handler.connected(conn); | 0 |
sb.append("<div class='left show'><dl>\n"); | 0 |
import org.apache.cocoon.components.flow.java.*;
ArrayList results = new ArrayList();
// Sort result
Collections.sort(results, new EmployeeComparator());
public class EmployeeComparator implements Comparator, Continuable {
public int compare(Object o1, Object o2) {
return ((Employee)o1).getId()-((Employee)o2).getId();
}
public boolean equals(Object obj) {
return true;
}
} | 0 |
package org.apache.felix.karaf.gshell.admin.internal.commands;
import org.apache.felix.karaf.gshell.admin.AdminService;
import org.apache.felix.karaf.gshell.admin.Instance;
import org.apache.felix.karaf.gshell.core.OsgiCommandSupport; | 0 |
public <R,P> R accept(NodeVisitor<? extends R, ? super P> visitor, P data)
{
return visitor.visit(this, data);
} | 0 |
// Used to specify the maximum # of versions of an Accumulo cell value to return
* Specify an Accumulo iterator type to manage the behavior of the underlying table scan this InputFormat's Record Reader will conduct, w/ priority dictating
* The Class RangeInputSplit. Encapsulates an Accumulo range for use in Map Reduce jobs.
* The Class AccumuloIteratorOption. Encapsulates specifics for an Accumulo iterator's optional configuration details - associated via the iteratorName. | 0 |
* @param jobKey Job key.
public boolean deleteJob(JobKey jobKey) { | 0 |
public void init(AbstractSession s, byte[] v_s, byte[] v_c, byte[] i_s, byte[] i_c) throws Exception {
super.init(s, v_s, v_c, i_s, i_c); | 1 |
* SSLNHttpServerConnectionFactory#SSLNHttpServerConnectionFactory(SSLContext,
* SSLSetupHandler, HttpRequestFactory, ByteBufferAllocator)}
* SSLNHttpServerConnectionFactory#SSLNHttpServerConnectionFactory(SSLContext,
* SSLSetupHandler)}
* SSLNHttpServerConnectionFactory#SSLNHttpServerConnectionFactory()} | 0 |
Collections.emptyList(), | 0 |
public NullInputStreamTest(final String name) {
final int size = 5;
final InputStream input = new TestNullInputStream(size);
final int result = input.read();
} catch (final IOException e) {
final byte[] bytes = new byte[10];
final InputStream input = new TestNullInputStream(15);
final int count1 = input.read(bytes);
final int count2 = input.read(bytes);
final int count3 = input.read(bytes);
final int count4 = input.read(bytes);
} catch (final IOException e) {
final int offset = 2;
final int lth = 4;
final int count5 = input.read(bytes, offset, lth);
final InputStream input = new TestNullInputStream(2, false, true);
final int result = input.read();
} catch (final EOFException e) {
final int readlimit = 10;
final InputStream input = new TestNullInputStream(100, true, false);
} catch (final IOException e) {
} catch (final IOException e) {
final InputStream input = new TestNullInputStream(100, false, true);
} catch (final UnsupportedOperationException e) {
} catch (final UnsupportedOperationException e) {
final InputStream input = new TestNullInputStream(10, true, false);
} catch (final IOException e) {
public TestNullInputStream(final int size) {
public TestNullInputStream(final int size, final boolean markSupported, final boolean throwEofException) {
protected void processBytes(final byte[] bytes, final int offset, final int length) {
final int startPos = (int)getPosition() - length; | 1 |
* Copyright (C) 2015 Google Inc. | 0 |
return attemptHiveConnection(pass,ldapEnabled);
return attemptHiveConnection(NO_PASSWORD,ldapEnabled);
private Response attemptHiveConnection(String pass, boolean ldapEnabled) {
if(isLoginError(e) && ldapEnabled) | 0 |
/*
* Copyright 2016-2018 Seznam.cz, a.s. | 0 |
private static final long serialVersionUID = -4604082205545049134L; | 0 |
package com.twitter.nexus.scheduler.configuration; | 0 |
package org.apache.hc.core5.http.nio.codecs; | 0 |
/** {@code ConverterRule} to replace {@code Intersect} with {@code BeamIntersectRel}. */
super(
LogicalIntersect.class,
Convention.NONE,
BeamLogicalConvention.INSTANCE,
"BeamIntersectRule");
@Override
public RelNode convert(RelNode rel) {
intersect.all); | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.io;
import java.nio.ByteOrder;
import org.junit.Assert;
import org.junit.Test;
public class ByteOrderFactoryTest {
private ByteOrder parseByteOrder(final String value) {
return ByteOrderFactory.parseByteOrder(value);
}
@Test
public void testParseBig() {
Assert.assertEquals(ByteOrder.BIG_ENDIAN, parseByteOrder("big"));
Assert.assertEquals(ByteOrder.BIG_ENDIAN, parseByteOrder("Big"));
Assert.assertEquals(ByteOrder.BIG_ENDIAN, parseByteOrder("BIG"));
}
@Test
public void testParseLittle() {
Assert.assertEquals(ByteOrder.LITTLE_ENDIAN, parseByteOrder("little"));
Assert.assertEquals(ByteOrder.LITTLE_ENDIAN, parseByteOrder("Little"));
Assert.assertEquals(ByteOrder.LITTLE_ENDIAN, parseByteOrder("LITTLE"));
}
} | 0 |
/*
* Copyright 2013 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
* <code>TranscoderOutput</code>. This method does nothing if the output already | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.hc.client5.http.cache.HttpCacheCASOperation;
import org.apache.hc.core5.util.Args;
@Override
public Map<String, HttpCacheEntry> getEntries(final Collection<String> keys) throws ResourceIOException {
Args.notNull(keys, "Key");
final Map<String, HttpCacheEntry> resultMap = new HashMap<>(keys.size());
for (final String key: keys) {
final HttpCacheEntry entry = getEntry(key);
if (entry != null) {
resultMap.put(key, entry);
}
}
return resultMap;
}
| 0 |
import org.apache.http.params.HttpParams;
public class TestHttpServer {
private final ListeningIOReactor ioReactor;
private final HttpParams params;
private final Object socketMutex;
private volatile IOReactorThread thread;
private volatile InetSocketAddress address;
public TestHttpServer(final HttpParams params) throws IOException {
this.ioReactor = new DefaultListeningIOReactor(2, params);
this.params = params;
this.socketMutex = new Object();
public HttpParams getParams() {
return this.params;
private void execute(final IOEventDispatch ioEventDispatch) throws IOException {
synchronized (this.socketMutex) {
this.address = (InetSocketAddress) this.ioReactor.listen(
new InetSocketAddress(0));
this.socketMutex.notifyAll();
public InetSocketAddress getSocketAddress() throws InterruptedException {
synchronized (this.socketMutex) {
this.socketMutex.wait();
public void start(final IOEventDispatch ioEventDispatch) {
this.thread = new IOReactorThread(ioEventDispatch);
this.thread.start();
}
public void shutdown() throws IOException {
this.ioReactor.shutdown();
try {
this.thread.join(500);
} catch (InterruptedException ignore) {
}
}
private class IOReactorThread extends Thread {
private final IOEventDispatch ioEventDispatch;
public IOReactorThread(final IOEventDispatch ioEventDispatch) {
super();
this.ioEventDispatch = ioEventDispatch;
}
public void run() {
try {
execute(this.ioEventDispatch);
} catch (IOException ex) {
ex.printStackTrace();
}
}
} | 0 |
import org.apache.sshd.client.subsystem.sftp.extensions.SftpClientExtension;
/**
* @param extensionType The extension type
* @return The extension instance - <B>Note:</B> it is up to the caller
* to invoke {@link SftpClientExtension#isSupported()} - {@code null} if
* this extension type is not implemented by the client
* @see #getServerExtensions()
*/
<E extends SftpClientExtension> E getExtension(Class<? extends E> extensionType);
/**
* @param extensionName The extension name
* @return The {@link SftpClientExtension} name - ignored if {@code null}/empty
* @return The extension instance - <B>Note:</B> it is up to the caller
* to invoke {@link SftpClientExtension#isSupported()} - {@code null} if
* this extension type is not implemented by the client
* @see #getServerExtensions()
*/
SftpClientExtension getExtension(String extensionName); | 0 |
} catch (final URISyntaxException e) { | 0 |
// nothing extra | 0 |
if (SideEffect.hasActions(result.getSideEffects().values())) {
throw new IllegalArgumentException(
"A terminal state should not specify actions: " + result);
}
LOG.info("Executing side-effects for update of " + job + ": " + result.getSideEffects());
for (Map.Entry<Integer, SideEffect> entry : result.getSideEffects().entrySet()) {
// TODO(wfarner): Persist SideEffect.getStatusChanges as JobInstanceUpdateEvents.
Optional<InstanceAction> action = entry.getValue().getAction();
if (action.isPresent()) {
Optional<InstanceActionHandler> handler = action.get().getHandler();
if (handler.isPresent()) {
Amount<Long, Time> reevaluateDelay = handler.get().getReevaluationDelay(
instance,
updateStore.fetchJobUpdateConfiguration(summary.getUpdateId()).get(),
taskStore,
stateManager,
updaterStatus);
executor.schedule(
getDeferredEvaluator(instance, summary.getUpdateId()),
reevaluateDelay.getValue(),
reevaluateDelay.getUnit().getTimeUnit());
} | 0 |
for (final String pattern : patterns) { | 0 |
* @version $Id$ | 0 |
import com.google.cloud.dataflow.sdk.util.TimeDomain;
if (c.timeDomain() != TimeDomain.PROCESSING_TIME) {
return TriggerResult.CONTINUE;
}
Instant delayedUntil = c.state().access(DELAYED_UNTIL_TAG).get().read();
if (delayedUntil == null || delayedUntil.isAfter(c.timestamp())) {
return TriggerResult.CONTINUE;
}
CombiningValueState<Instant, Instant> delayed = c.state().access(DELAYED_UNTIL_TAG);
Instant timestamp = delayed.get().read();
delayed.clear();
if (timestamp != null) {
c.timers().deleteTimer(timestamp, TimeDomain.PROCESSING_TIME);
} | 0 |
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//beanutils/src/test/org/apache/commons/beanutils/BeanUtilsTestCase.java,v 1.9 2002/04/27 23:11:23 craigmcc Exp $
* $Revision: 1.9 $
* $Date: 2002/04/27 23:11:23 $
* @version $Revision: 1.9 $
* Test populate() on mapped properties.
*/
public void testPopulateMapped() {
try {
HashMap map = new HashMap();
map.put("mappedProperty(First Key)", "New First Value");
map.put("mappedProperty(Third Key)", "New Third Value");
BeanUtils.populate(bean, map);
assertEquals("mappedProperty(First Key)",
"New First Value",
bean.getMappedProperty("First Key"));
assertEquals("mappedProperty(Second Key)",
"Second Value",
bean.getMappedProperty("Second Key"));
assertEquals("mappedProperty(Third Key)",
"New Third Value",
bean.getMappedProperty("Third Key"));
assertNull("mappedProperty(Fourth Key",
bean.getMappedProperty("Fourth Key"));
} catch (IllegalAccessException e) {
fail("IllegalAccessException");
} catch (InvocationTargetException e) {
fail("InvocationTargetException");
}
}
/** | 0 |
private static final long serialVersionUID = 0;
| 0 |
import org.apache.cocoon.forms.FormsException;
if (!(definition instanceof RepeaterDefinition)) {
throw new FormsException("Parent definition " + definition.getClass().getName() + " is not a RepeaterDefinition.",
getLocation());
}
RepeaterDefinition other = (RepeaterDefinition) definition;
this.initialSize = other.initialSize;
this.maxSize = other.maxSize;
this.minSize = other.minSize; | 0 |
import java.util.Map;
NamedResource resourceKey, PuttyKeyReader pubReader, PuttyKeyReader prvReader, Map<String, String> headers) | 0 |
import org.apache.beam.sdk.transforms.DoFnSchemaInformation;
WINDOWING_STRATEGY,
DoFnSchemaInformation.create()); | 0 |
import org.apache.cocoon.objectmodel.ObjectModel;
import org.apache.cocoon.xml.NamespacesTable;
ObjectModel objectModel,
NamespacesTable namespaces, Event startEvent, Event endEvent) throws SAXException { | 0 |
* @param compare result of comparison to evaluate | 0 |
/*
* Copyright (c) OSGi Alliance (2012, 2014). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.osgi.service.http.runtime.dto;
/**
* Represents a resource definition which is currently not being used by a
* servlet context due to a problem.
* <p>
* As the resource represented by this DTO is not used due to a failure, the
* field {@link FailedResourceDTO#servletContextId} always returns {@code 0} and
* does not point to an existing servlet context.
*
* @NotThreadSafe
* @author $Id$
*/
public class FailedResourceDTO extends ResourceDTO {
/**
* The reason why the resource represented by this DTO is not used.
*
* @see DTOConstants#FAILURE_REASON_UNKNOWN
* @see DTOConstants#FAILURE_REASON_EXCEPTION_ON_INIT
* @see DTOConstants#FAILURE_REASON_NO_SERVLET_CONTEXT_MATCHING
* @see DTOConstants#FAILURE_REASON_SERVICE_NOT_GETTABLE
* @see DTOConstants#FAILURE_REASON_SERVLET_CONTEXT_FAILURE
* @see DTOConstants#FAILURE_REASON_SHADOWED_BY_OTHER_SERVICE
*/
public int failureReason;
} | 0 |
Init.init();
Document doc=DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
Attr uri=doc.createAttribute("id");
uri.setNodeValue("urn:ddd:uuu");
((Element)doc.createElement("test")).setAttributeNode(uri);
try {
ResourceResolver resolver=ResourceResolver.getInstance(uri, null);
fail("No exception throw, but resolver found:"+resolver);
} catch (ResourceResolverException e) {
}
} | 0 |
import org.apache.accumulo.core.clientImpl.Namespace;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.securityImpl.thrift.TCredentials; | 0 |
import cz.seznam.euphoria.core.client.dataset.WindowContext;
public interface WindowAware<IN, W extends WindowContext<?, ?>> { | 0 |
import com.google.bigtable.v2.MutateRowResponse;
import com.google.bigtable.v2.Mutation;
import com.google.bigtable.v2.Row;
import com.google.bigtable.v2.RowFilter;
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.config.BigtableOptions;
import com.google.cloud.bigtable.config.RetryOptions;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.protobuf.ByteString;
import io.grpc.Status;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentLinkedQueue;
import javax.annotation.Nullable; | 0 |
import java.util.Set;
private Set<String> excludedConfigTypes;
excludedConfigTypes = service.getExcludedConfigTypes();
public Set<String> getExcludedConfigTypes() {
return excludedConfigTypes;
} | 0 |
/** This is a map using the name as the key and {@link PropertyDescription}
* as values.
*/
final private Map properties = new HashMap();
public void testProperty(JavaTag property, String defaultName, JavaField field, boolean isInspectedClass)
properties.put(propName, new PropertyDescription(property, field));
}
}
}
public void handleField(JavaField javaField, boolean isInspectedClass)
throws MojoExecutionException {
final JavaTag tag = javaField.getTagByName(Constants.PROPERTY);
if (tag != null) {
String defaultName = null;
if ( "java.lang.String".equals(javaField.getType()) ) {
final String[] initValues = javaField.getInitializationExpression();
if ( initValues != null && initValues.length == 1 ) {
defaultName = initValues[0];
}
this.testProperty(tag, defaultName, javaField, isInspectedClass);
public void processProperties(final Component component, final OCD ocd)
throws MojoExecutionException {
final Iterator propIter = properties.entrySet().iterator();
while ( propIter.hasNext() ) {
final Map.Entry entry = (Map.Entry)propIter.next();
final String propName = entry.getKey().toString();
final PropertyDescription desc = (PropertyDescription)entry.getValue();
this.doProperty(desc.propertyTag, propName, component, ocd, desc.field);
}
}
protected static final class PropertyDescription {
public final JavaTag propertyTag;
public final JavaField field;
public PropertyDescription(final JavaTag p, final JavaField f) {
this.propertyTag = p;
this.field = f;
}
} | 0 |
import com.google.common.annotations.VisibleForTesting;
public class AvroCoder<T> extends CustomCoder<T> {
private final AvroCoder<T> myCoder = AvroCoder.this;
return myCoder.createDatumReader();
this.writer =
new EmptyOnDeserializationThreadLocal<DatumWriter<T>>() {
private final AvroCoder<T> myCoder = AvroCoder.this;
@Override
public DatumWriter<T> initialValue() {
return myCoder.createDatumWriter();
}
};
@VisibleForTesting
@VisibleForTesting
// TODO: once we can remove this deprecated function, inline in constructor. | 0 |
import static junit.framework.Assert.assertNotNull;
import static junit.framework.Assert.assertNull;
@Test
public void testRenameChildren() {
TestResourceDefinition resourceDefinition = new TestResourceDefinition();
ResourceDefinition.PostProcessor processor = resourceDefinition.getPostProcessors().iterator().next();
TreeNode<Resource> node = new TreeNodeImpl<Resource>(new TreeNodeImpl<Resource>(null, null, null), null, "test");
TreeNode<Resource> child = new TreeNodeImpl<Resource>(node, null, "stackServices");
node.addChild(child);
child = new TreeNodeImpl<Resource>(node, null, "serviceComponents");
node.addChild(child);
child = new TreeNodeImpl<Resource>(node, null, "operatingSystems");
node.addChild(child);
String href = "/stacks/HDP/versions/1.3.2/stackServices/foo";
assertNotNull(node.getChild("stackServices"));
assertNotNull(node.getChild("serviceComponents"));
assertNotNull(node.getChild("operatingSystems"));
assertNull(node.getChild("services"));
assertNull(node.getChild("components"));
assertNull(node.getChild("operating_systems"));
processor.process(null, node, href);
assertNull(node.getChild("stackServices"));
assertNull(node.getChild("serviceComponents"));
assertNull(node.getChild("operatingSystems"));
assertNotNull(node.getChild("services"));
assertNotNull(node.getChild("components"));
assertNotNull(node.getChild("operating_systems"));
}
| 0 |
protected List<Field> lFields = new ArrayList<Field>();
public List<Field> getFields() {
List<Field> templFields = new ArrayList<Field>();
Map<String, Field> temphFields = new FastHashMap();
Iterator<Field> dependsIt = depends.getFields().iterator();
for (Iterator<Field> i = parent.getFields().iterator(); i.hasNext(); ) {
Field f = i.next();
for (Iterator<Field> i = lFields.listIterator(n); i.hasNext(); ) {
Field f = i.next();
for (Iterator<Field> i = lFields.iterator(); i.hasNext(); ) {
ValidatorResults validate(Map<String, ? super Object> params, Map actions, int page, String fieldName)
// TODO the params map contains both ValidatorResults and Field entries
Iterator<Field> fields = this.lFields.iterator();
Field field = fields.next(); | 1 |
* Minimal interface to read a block from a
*
* | 0 |
import org.apache.hc.core5.io.CloseMode;
server.close(CloseMode.IMMEDIATE);
requester.close(CloseMode.GRACEFUL); | 0 |
package org.apache.felix.ipojo.runtime.core.api.services;
public interface BarService {
public void doSomethingWithBar();
} | 0 |
import com.google.common.collect.Lists;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.PriorityQueue; | 0 |
/*
* Copyright 2002-2004 The Apache Software Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* @version $Revision: 1.16 $ $Date: 2004/02/18 01:15:42 $ | 0 |
import com.google.cloud.dataflow.sdk.runners.worker.status.BaseStatusServlet;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public BaseStatusServlet statusServlet() {
return new BaseStatusServlet("/cachez") {
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws IOException, ServletException {
PrintWriter writer = response.getWriter();
writer.println("<h1>Cache Information</h1>");
printSummaryHtml(writer);
}
}; | 0 |
assertEquals("tableId of the keyExtent should be 1", "1", markers.keySet().iterator().next().getTableId().canonicalID()); | 1 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
* {@code null} for HTTP. The actual version | 0 |
import org.apache.batik.dom.AbstractAttr;
import org.apache.batik.util.SVGConstants;
return ((AbstractAttr) a).isId(); | 0 |
* <p>
* </p>
* @return {@code true} if this connection is secure,
* {@code false} otherwise
* @param secure {@code true} if this connection is secure, for
* example if an {@code SSLSocket} is used, or
* {@code false} if it is not secure
* <p>
* </p>
* or {@code null} to continue using the old socket.
* If {@code null} is passed, helper objects that
* @param secure {@code true} if this connection is now secure,
* {@code false} if it is not secure | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.