Diff
stringlengths 5
2k
| FaultInducingLabel
int64 0
1
|
---|---|
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//jelly/jelly-tags/dynabean/src/test/org/apache/commons/jelly/tags/dynabean/TestJelly.java,v 1.1 2003/01/15 15:18:32 dion Exp $
* $Revision: 1.1 $
* $Date: 2003/01/15 15:18:32 $
* $Id: TestJelly.java,v 1.1 2003/01/15 15:18:32 dion Exp $
package org.apache.commons.jelly.tags.dynabean;
* @version $Revision: 1.1 $ | 0 |
URL userDir = new File(System.getProperty("user.dir")).toURI().toURL(); | 0 |
package org.apache.commons.digester3.plugins;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License. | 0 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.ambari.logsearch.config.api.model.inputconfig;
import java.util.List;
public interface PostMapValues {
List<MapFieldDescriptor> getMappers();
} | 0 |
public SynchronizedListTest(final String testName) { | 0 |
*
Collection<?> col = (Collection<?>) arg;
for (Object o : ((Collection<?>) arg)) | 0 |
* object of this class. All interpolation tasks are delegated to this object.
* <code>StrLookup</code> objects, each of which is identified by a special
* will be used for variables that do not have a prefix or that cannot be
* resolved by their associated lookup object.
* <code>Configuration</code> object and used for its interpolation tasks.
* </p>
* no prefix can be found or if the associated lookup object cannot resolve
* this variable, the default lookup object will be used.
if (prefixPos >= 0)
String value = fetchLookupForPrefix(prefix).lookup(name);
if (value != null)
{
return value;
}
return fetchNoPrefixLookup().lookup(var); | 0 |
package org.apache.bcel;
import org.apache.bcel.classfile.AnnotationDefault;
import org.apache.bcel.classfile.ElementValue;
import org.apache.bcel.classfile.JavaClass;
import org.apache.bcel.classfile.Method;
import org.apache.bcel.classfile.SimpleElementValue;
public class AnnotationDefaultAttributeTestCase extends AbstractTestCase
{
/**
* For values in an annotation that have default values, we should be able
* to query the AnnotationDefault attribute against the method to discover
* the default value that was originally declared.
*/
public void testMethodAnnotations() throws ClassNotFoundException
{
JavaClass clazz = getTestClass("org.apache.bcel.data.SimpleAnnotation");
Method m = getMethod(clazz, "fruit");
AnnotationDefault a = (AnnotationDefault) findAttribute(
"AnnotationDefault", m.getAttributes());
SimpleElementValue val = (SimpleElementValue) a.getDefaultValue();
assertTrue("Should be STRING but is " + val.getElementValueType(), val
.getElementValueType() == ElementValue.STRING);
assertTrue("Should have default of bananas but default is "
+ val.getValueString(), val.getValueString().equals("bananas"));
}
} | 0 |
/* Generated By:JavaCC: Do not edit this line. JavaCharStream.java Version 3.0 */
*
*
*
* Method to adjust line and column numbers for the start of a token. | 0 |
public <R, P> R accept( NodeVisitor<? extends R, ? super P> visitor, P data )
throws OgnlException | 0 |
static { | 0 |
fileScanner.setProject(AntTagLibrary.getProject(context));
| 0 |
import org.apache.beam.model.pipeline.v1.MetricsApi.MonitoringInfo; | 0 |
* Stream that cuts off after a defined number of bytes.
* <p>
* gets called. Instead, it will read until the "end" of its limit on
* Wraps a session input buffer and cuts off output after a defined number
* of bytes.
* @param in The session input buffer | 0 |
package org.apache.atlas.web.resources;
import org.apache.atlas.MetadataException;
import org.apache.atlas.MetadataServiceClient;
import org.apache.atlas.services.MetadataService;
import org.apache.atlas.typesystem.types.DataTypes;
import org.apache.atlas.web.util.Servlets;
* org.apache.atlas.typesystem.types.DataTypes.TypeCategory | 0 |
import org.apache.accumulo.core.client.security.tokens.PasswordToken; | 0 |
import java.util.Set;
public void setAlreadyDeployedFilesSet(Set alreadyDeployedFilesSet); | 0 |
signalChannelOpenFailure(e);
f.setException(e);
} finally {
notifyStateChanged(e.getClass().getSimpleName());
String changeEvent = session.toString();
signalChannelOpenSuccess();
changeEvent = e.getClass().getSimpleName();
signalChannelOpenFailure(e);
} finally {
notifyStateChanged(changeEvent);
signalChannelOpenFailure(problem);
notifyStateChanged(problem.getClass().getSimpleName()); | 0 |
protected void appendRootUri(final StringBuffer buffer, boolean addPassword)
super.appendRootUri(buffer, addPassword);
protected void appendCredentials(StringBuffer buffer, boolean addPassword)
super.appendCredentials(buffer, addPassword); | 0 |
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 0 |
package org.apache.batik.ext.awt.image.codec.util; | 0 |
import org.apache.cocoon.portal.util.XMLUtils;
import org.xml.sax.helpers.AttributesImpl;
* </ul>
* </tbody></table>
XMLUtils.addCDATAAttribute(attributes, (String)entry.getKey(), (String)entry.getValue()); | 0 |
import org.apache.avalon.excalibur.pool.Recyclable;
import org.apache.cocoon.components.language.programming.LanguageCompiler;
import org.apache.cocoon.util.AbstractLogEnabled;
*
* @version $Id$
public abstract class AbstractJavaCompiler extends AbstractLogEnabled
implements LanguageCompiler, Recyclable {
protected String encoding; | 0 |
/** Field implementedTransformURI */
public static final String implementedTransformURI =
Transforms.TRANSFORM_ENVELOPED_SIGNATURE;
* Method engineGetURI
*
protected String engineGetURI() {
return implementedTransformURI;
}
/**
* @inheritDoc
*/
protected XMLSignatureInput enginePerformTransform(
XMLSignatureInput input, Transform transformObject
) throws TransformationException {
Node signatureElement = transformObject.getElement();
Node exclude;
exclude = n;
public int isNodeIncludeDO(Node n, int level) {
if (n == exclude) {
return -1;
}
return 1;
}
if (n == exclude || XMLUtils.isDescendantOrSelf(exclude, n)) {
return -1;
}
return 1; | 0 |
* <p> Use {@link com.google.cloud.dataflow.sdk.testing.PAssert} to add tests, then call
* <p> Use {@link com.google.cloud.dataflow.sdk.testing.PAssert} to add tests, then call
* <p> Use {@link com.google.cloud.dataflow.sdk.testing.PAssert} to add tests, then call | 0 |
// TODO cater for null request
Object type = resource.getProperties().get(IdentityNamespace.CAPABILITY_TYPE_ATTRIBUTE);
OSGiCapabilityImpl c = OSGiRepositoryImpl.newOSGiIdentityCapability(resource.getSymbolicName(),
type != null ? type.toString() : IdentityNamespace.TYPE_BUNDLE, resource.getVersion());
c.setResource(this);
return Collections.<Capability>singletonList(c);
// TODO cater for null request
OSGiCapabilityImpl c = OSGiRepositoryImpl.newOSGiContentCapability(resource.getURI(), resource.getSize());
c.setResource(this);
return Collections.<Capability>singletonList(c);
}
catch (Exception e) | 0 |
* @return the JCE ProviderName
* Proxy method for {@link java.security.Signature#update(byte[])}
* Proxy method for {@link java.security.Signature#update(byte[])}
* Proxy method for {@link java.security.Signature#update(byte[], int, int)}
* Proxy method for {@link java.security.Signature#initSign(java.security.PrivateKey)}
* Proxy method for {@link java.security.Signature#initSign(java.security.PrivateKey, java.security.SecureRandom)}
* Proxy method for {@link java.security.Signature#sign()}
* @return the result of the {@link java.security.Signature#sign()} method
* Proxy method for {@link java.security.Signature#verify(byte[])}
* @return
* Proxy method for {@link java.security.Signature#setParameter(java.security.spec.AlgorithmParameterSpec)}
protected void engineGetContextFromElement(Element element) {
{
} | 0 |
import java.util.concurrent.TimeUnit;
import org.apache.beam.runners.core.construction.PipelineOptionsTranslation;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PortablePipelineOptions;
import org.apache.flink.api.java.ExecutionEnvironment;
PipelineOptions pipelineOptions =
PipelineOptionsTranslation.fromProto(jobInfo.pipelineOptions());
int environmentCacheTTLMillis =
pipelineOptions.as(PortablePipelineOptions.class).getEnvironmentCacheMillis();
if (environmentCacheTTLMillis > 0) {
if (this.getClass().getClassLoader() != ExecutionEnvironment.class.getClassLoader()) {
LOG.warn(
"{} is not loaded on parent Flink classloader. "
+ "Falling back to synchronous environment release for job {}.",
this.getClass(),
jobInfo.jobId());
release(wrapper);
} else {
// Schedule task to clean the container later.
// Ensure that this class is loaded in the parent Flink classloader.
getExecutor()
.schedule(() -> release(wrapper), environmentCacheTTLMillis, TimeUnit.MILLISECONDS);
}
} else {
// Do not release this asynchronously, as the releasing could fail due to the classloader not
// being available anymore after the tasks have been removed from the execution engine.
release(wrapper);
} | 0 |
String className
Constructor constructor
String propertyName
Object propertyValue)
Method m = cl.getMethod("set" + propertyName,
new Class[]{propertyValue.getClass()});
Constructor constructor
| 0 |
* @version CVS $Id: Schema.java,v 1.2 2003/04/26 12:10:43 stephan Exp $
/**
*
*
* @return
*/
| 0 |
dbAccessor.dropConstraint("viewinstance", "FK_viewinst_view_name");
dbAccessor.addFKConstraint("viewinstance", "FK_viewinst_view_name", "view_name", "viewmain", "view_name", true); | 0 |
/** To allow child classes to know which path they bind to */
protected String getXPath() {
return this.xpath;
} | 0 |
* <li>{@link FTPClientConfig#SYST_NETWARE}</li> | 0 |
import java.util.LinkedHashMap;
import java.util.Map;
public synchronized Map<String, Object> getConnectionInfo(boolean brief) {
Map<String, Object> info = new LinkedHashMap<String, Object>();
info.put("remote_socket_address", getRemoteSocketAddress());
info.put("interest_ops", getInterestOps());
info.put("outstanding_requests", getOutstandingRequests());
info.put("packets_received", getPacketsReceived());
info.put("packets_sent", getPacketsSent());
if (!brief) {
info.put("session_id", getSessionId());
info.put("last_operation", getLastOperation());
info.put("established", getEstablished());
info.put("session_timeout", getSessionTimeout());
info.put("last_cxid", getLastCxid());
info.put("last_zxid", getLastZxid());
info.put("last_response_time", getLastResponseTime());
info.put("last_latency", getLastLatency());
info.put("min_latency", getMinLatency());
info.put("avg_latency", getAvgLatency());
info.put("max_latency", getMaxLatency());
}
return info;
} | 1 |
* @version $Revision$ | 0 |
* or more contributor license agreements. See the NOTICE file
* regarding copyright ownership. The ASF licenses this file
* with the License. You may obtain a copy of the License at
* KIND, either express or implied. See the License for the | 0 |
import org.apache.hc.core5.io.ModalCloseable;
import org.apache.hc.core5.util.Deadline;
private volatile Deadline expiryDeadline = Deadline.MIN_VALUE;
private volatile Deadline validityDeadline = Deadline.MIN_VALUE;
* @since 5.0 return value is Deadline.
public Deadline getValidityDeadline() {
public Deadline getExpiryDeadline() {
return this.expiryDeadline;
this.validityDeadline = Deadline.calculate(this.created, this.timeToLive);
this.expiryDeadline = this.validityDeadline;
this.expiryDeadline = Deadline.MIN_VALUE;
this.validityDeadline = Deadline.MIN_VALUE;
final Deadline newExpiry = Deadline.calculate(currentTime, expiryTime);
this.expiryDeadline = newExpiry.min(this.validityDeadline); | 0 |
private static final Set<String> PK_PROPERTY_IDS = new HashSet<>(
Arrays.asList(UPGRADE_REQUEST_ID, UPGRADE_ITEM_STAGE_ID));
private static final Set<String> PROPERTY_IDS = new HashSet<>();
private static final Map<Resource.Type, String> KEY_PROPERTY_IDS = new HashMap<>();
private static Map<String, String> STAGE_MAPPED_IDS = new HashMap<>();
Set<Resource> results = new LinkedHashSet<>();
List<UpgradeItemEntity> entities = new ArrayList<>();
Map<Long, Resource> resultMap = new HashMap<>();
requestId, new ArrayList<>(resultMap.keySet())); | 1 |
Set<BundleRevision> mandatoryRevisions,
// Maps a host capability to a map containing its potential fragments;
// the fragment map maps a fragment symbolic name to a map that maps
// a version to a list of fragments requirements matching that symbolic
// name and version.
Map<BundleCapability, Map<String, Map<Version, List<BundleRequirement>>>>
hostFragments = Collections.EMPTY_MAP;
hostFragments = populateDependents();
hostEntry : hostFragments.entrySet())
// Maps a host capability to a map containing its potential fragments;
// the fragment map maps a fragment symbolic name to a map that maps
// a version to a list of fragments requirements matching that symbolic
// name and version.
private Map<BundleCapability,
Map<String, Map<Version, List<BundleRequirement>>>> populateDependents()
Map<BundleCapability, Map<String, Map<Version, List<BundleRequirement>>>>
hostFragments = new HashMap<BundleCapability,
Map<String, Map<Version, List<BundleRequirement>>>>();
fragments = hostFragments.get(cap);
hostFragments.put(cap, fragments);
return hostFragments;
m_mandatoryRevisions, dependentMap, candidateMap,
m_allWrappedHosts, m_populateResultCache, m_fragmentsPresent);
} | 0 |
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PREDICTABLE_RANDOM",
justification = "predictable random is okay for testing") | 0 |
new InMemoryNodeModel(nd).getNodeHandler()); | 0 |
expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); | 0 |
// CLI-40: For some reason, the YYYY_MM_DD object gets quite
// confused here and returns 2003-12-22. If we make a new one | 0 |
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.time.Duration;
import java.util.Set; | 0 |
* @version $Id: JellyContextAdapter.java,v 1.2 2003/03/03 20:49:37 werken Exp $
public class JellyContextAdapter implements Context | 0 |
import com.google.cloud.dataflow.sdk.transforms.windowing.DefaultTrigger; | 0 |
import static org.junit.Assert.assertEquals;
| 0 |
import java.util.ArrayList;
import org.apache.accumulo.core.client.admin.DiskUsage;
@Override
public List<DiskUsage> getDiskUsage(Set<String> tables) throws AccumuloException, AccumuloSecurityException {
List<DiskUsage> diskUsages = new ArrayList<DiskUsage>();
for(String table : tables) {
TreeSet<String> tree = new TreeSet<String>();
tree.add(table);
diskUsages.add(new DiskUsage(tree, 1l));
}
return diskUsages;
}
| 0 |
package org.apache.felix.sigil.repository; | 0 |
this.context = new SessionHttpContext(session); | 0 |
if (arrayContains(new char[] { 'A', 'E', 'I', 'J', 'O', 'U', 'Y' }, chr)) { | 0 |
cfSet = new HashSet<>(); | 0 |
public SubmitDefinition() {
validateForm = true;
}
| 0 |
* Copyright 2000-2009 The Apache Software Foundation | 0 |
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. | 0 |
Assert.assertEquals(954466304, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
Assert.assertEquals(14569736, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
Assert.assertEquals(24993392, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "NumOpenConnections")));
Assert.assertEquals(4928861, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
Assert.assertEquals(13.211112159230245, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "RpcProcessingTime_avg_time")));
Assert.assertEquals(25067, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "RpcProcessingTime_num_ops")));
Assert.assertEquals(0.19686821997924706, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "RpcQueueTime_avg_time")));
Assert.assertEquals(25067, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "RpcQueueTime_num_ops")));
Assert.assertEquals(6578899, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "SentBytes"))); | 0 |
Mockito.when(conn.isDataAvailable(Mockito.anyInt())).thenReturn(Boolean.TRUE);
Mockito.verify(conn).isDataAvailable(3000);
Mockito.when(conn.isDataAvailable(Mockito.anyInt())).thenReturn(Boolean.TRUE);
Mockito.verify(conn).isDataAvailable(3000);
Mockito.when(conn.isDataAvailable(Mockito.anyInt())).thenReturn(Boolean.TRUE);
Mockito.when(conn.isDataAvailable(Mockito.anyInt())).thenReturn(Boolean.FALSE);
Mockito.verify(conn).isDataAvailable(3000); | 0 |
private static SecurityTokenFactory instance;
if (instance == null) {
instance = securityTokenFactoryClass.newInstance();
return instance; | 0 |
Element imageElement = domFactory.createElement(SVGSyntax.SVG_IMAGE_TAG); | 0 |
if (connState.getOutputState() != ClientConnState.READY) { | 0 |
* $Id: TestCollectionUtils.java,v 1.13 2003/01/27 23:19:28 rwaldhoff Exp $
* $Revision: 1.13 $
* $Date: 2003/01/27 23:19:28 $
* @version $Revision: 1.13 $ $Date: 2003/01/27 23:19:28 $
public void testIntersectionUsesMethodEquals() {
// Let elta and eltb be objects...
Object elta = new Integer(17);
Object eltb = new Integer(17);
// ...which are equal...
assertEquals(elta,eltb);
assertEquals(eltb,elta);
// ...but not the same (==).
assertTrue(elta != eltb);
// Let cola and colb be collections...
Collection cola = new ArrayList();
Collection colb = new ArrayList();
// ...which contain elta and eltb,
// repsectively.
cola.add(elta);
colb.add(eltb);
// Then the intersection of the two
// should contain one element.
Collection intersection = CollectionUtils.intersection(cola,colb);
assertEquals(1,intersection.size());
// In practice, this element will be the same (==) as elta
// or eltb, although this isn't strictly part of the
// contract.
Object eltc = intersection.iterator().next();
assertTrue((eltc == elta && eltc != eltb) || (eltc != elta && eltc == eltb));
// In any event, this element remains equal,
// to both elta and eltb.
assertEquals(elta,eltc);
assertEquals(eltc,elta);
assertEquals(eltb,eltc);
assertEquals(eltc,eltb);
} | 0 |
@Named("hostKerberosIdentity")
ResourceProvider getHostKerberosIdentityResourceProvider(AmbariManagementController managementController);
| 0 |
/**
* The role that will be used when creating HRC's for the type
* {@link StageWrapper.Type#RU_TASKS}.
*/
protected static final String EXECUTE_TASK_ROLE = "ru_execute_tasks";
// if the service/component are specified, then make sure to grab them off
// of the wrapper so they can be stored on the command for use later
String serviceName = null;
String componentName = null;
TaskWrapper taskWrapper = wrapper.getTasks().get(0);
serviceName = taskWrapper.getService();
componentName = taskWrapper.getComponent();
// add each host to this stage
RequestResourceFilter filter = new RequestResourceFilter(serviceName, componentName,
new ArrayList<>(wrapper.getHosts()));
EXECUTE_TASK_ROLE, Collections.singletonList(filter), params); | 0 |
result=c14n.engineCanonicalizeSubTree(input.getSubNode(),excl);
} else {
result = c14n.engineCanonicalizeXPathNodeSet(input.getNodeSet());
} | 0 |
import org.apache.accumulo.core.client.security.SecurityErrorCode;
if (e.getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST)) | 0 |
import cz.seznam.euphoria.core.client.io.BoundedDataSource;
import cz.seznam.euphoria.core.client.io.BoundedPartition;
import cz.seznam.euphoria.core.client.io.BoundedReader;
private static <V> BoundedDataSource<V> fromBase64(String base64bytes)
private BoundedPartition<V> partition;
SourceSplit(BoundedPartition<V> partition) {
BoundedDataSource<V> source;
BoundedReader<V> reader = split.partition.openReader(); | 0 |
/*
* Copyright 2003-2004 The Apache Software Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* @version $Revision: 1.3 $ $Date: 2004/02/18 00:58:53 $ | 0 |
* Content encoder that cuts off after a defined number of bytes. This class
* is used to send content of HTTP messages where the end of the content entity
* is determined by the value of the <code>Content-Length header</code>.
* long.
* This decoder is optimized to transfer data directly from
* a {@link FileChannel} to the underlying I/O session's channel whenever
* possible avoiding intermediate buffering in the session buffer.
*
public class LengthDelimitedEncoder extends AbstractContentEncoder
final WritableByteChannel channel,
}
final FileChannel src,
long position,
| 0 |
* <p><strong>NOTE:</strong> the earlier statement about the fact that this class uses java.text.SimpleDateFormat
*
* @version CVS $Id: FormattingDateConvertor.java,v 1.3 2003/12/31 17:15:46 vgritsenko Exp $ | 0 |
* @version $Revision$ | 1 |
import org.apache.accumulo.core.client.BatchWriterConfig;
BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
BatchWriter bw = c.createBatchWriter("perDayCounts", new BatchWriterConfig());
BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
BatchDeleter deleter = c.createBatchDeleter("test", Constants.NO_AUTHS, 2, new BatchWriterConfig());
BatchWriter writer = c.createBatchWriter("test", new BatchWriterConfig());
writer = c.createBatchWriter("test", new BatchWriterConfig());
BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
MultiTableBatchWriter bw = c.createMultiTableBatchWriter(new BatchWriterConfig());
BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig()); | 0 |
assertEquals("Check tablename is set", 0, tablename.compareTo(opts.tableName));
assertEquals("Check tablename is set", 0, tablename.compareTo(opts.tableName)); | 0 |
LoggerFactory.getLogger(FinishCreateNamespace.class)
.debug("Created table " + namespaceInfo.namespaceId + " " + namespaceInfo.namespaceName); | 0 |
package org.apache.cocoon.template.jxtg.instruction; | 0 |
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting; | 0 |
/* $Id$ | 0 |
import java.util.HashSet;
import java.util.List;
import org.apache.commons.collections.MapUtils;
Set<String> cacheResult = logFileNameCache.getIfPresent(key);
HostLogFilesResponse logFilesResponse = helper.sendGetLogFileNamesRequest(host);
if (logFilesResponse != null && MapUtils.isNotEmpty(logFilesResponse.getHostLogFiles())) {
for (Map.Entry<String, List<String>> componentEntry : logFilesResponse.getHostLogFiles().entrySet()) {
final String key = generateKey(componentEntry.getKey(), host);
logFileNameCache.put(key, new HashSet<>(componentEntry.getValue()));
} | 0 |
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. | 0 |
package org.apache.cocoon.portal.event;
* This interface marks an event as an event for a coplet data (or
* for all coplet instance datas).
* @author <a href="mailto:[email protected]">Carsten Ziegeler</a>
* @version CVS $Id: CopletDataEvent.java,v 1.1 2004/02/12 09:33:30 cziegeler Exp $
public interface CopletDataEvent
extends ActionEvent {
} | 0 |
import org.apache.sshd.common.io.DefaultIoServiceFactoryFactory;
import org.apache.sshd.common.io.IoServiceFactoryFactory;
System.out.println("\nStarting " + description.getClassName() + ":" + description.getMethodName() + "...");
try {
System.out.println("Using provider: " + DefaultIoServiceFactoryFactory.newInstance(IoServiceFactoryFactory.class).getClass().getName());
} catch (Throwable t) {
// Ignore
}
System.out.println(); | 0 |
// This overrides the default behavior of BundleImpl.getFramework()
// to return "this", since the system bundle is the framework.
SecurityProvider getSecurityProvider()
{
return m_securityProvider;
}
| 1 |
String tableMetadataTabletDir = fs.choose(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), TABLE_TABLETS_TABLET_DIR));
String defaultMetadataTabletDir = fs.choose(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION));
mfw.append(tableDirKey, new Value(tableMetadataTabletDir.getBytes()));
mfw.append(defaultDirKey, new Value(defaultMetadataTabletDir.getBytes()));
for (String s : Arrays.asList(tableMetadataTabletDir, defaultMetadataTabletDir)) {
Path dir = new Path(s); | 1 |
char[] text = new char[xmlr.getTextLength()];
xmlr.getTextCharacters(0, text, 0, xmlr.getTextLength());
writer.writeCharacters(text, 0, text.length); | 0 |
@SuppressWarnings("rawtypes") | 0 |
import org.apache.ambari.server.security.authentication.AmbariUserDetailsImpl;
Authentication auth = new AmbariUserAuthentication(null, new AmbariUserDetailsImpl(user, null, null)); | 0 |
* $Header: /home/jerenkrantz/tmp/commons/commons-convert/cvs/home/cvs/jakarta-commons//cli/src/java/org/apache/commons/cli/Parser.java,v 1.7 2002/10/24 23:17:49 jkeyes Exp $
* $Revision: 1.7 $
* $Date: 2002/10/24 23:17:49 $
* @version $Revision: 1.7 $
OptionGroup group = ( OptionGroup ) options.getOptionGroup( opt );
if( group.isRequired() ) {
requiredOptions.remove( group );
}
group.setSelected( opt ); | 0 |
public class Distinct<IN, ELEM, W extends Window<W>>
public <W extends Window<W>> OutputBuilder<IN, ELEM, W>
public static class OutputBuilder<IN, ELEM, W extends Window<W>> | 0 |
public void add(KeyReference keyReference) {
this._constructionElement.appendChild(keyReference.getElement());
XMLUtils.addReturnToElement(this._constructionElement);
}
* @return
return this.length(EncryptionConstants.EncryptionSpecNS,
EncryptionConstants._TAG_DATAREFERENCE);
* @return
return this.length(EncryptionConstants.EncryptionSpecNS,
EncryptionConstants._TAG_KEYREFERENCE);
* @return
public DataReference itemDataReference(int i) throws XMLSecurityException {
Element e = this.getChildElementLocalName(i,
EncryptionConstants.EncryptionSpecNS,
EncryptionConstants._TAG_DATAREFERENCE);
if (e != null) {
return new DataReference(e, this._baseURI);
} else {
return null;
}
* @return
public KeyReference itemKeyReference(int i) throws XMLSecurityException {
Element e = this.getChildElementLocalName(i,
EncryptionConstants.EncryptionSpecNS,
EncryptionConstants._TAG_KEYREFERENCE);
if (e != null) {
return new KeyReference(e, this._baseURI);
} else {
return null;
} | 0 |
import java.util.Iterator;
public class WebContinuation implements Comparable, Cloneable {
/**
* Creates a clone of this WebContinuation without trying to clone the actual continuation, the
* user object or the disposer.
*
* TODO: Check continuation, user object, disposer for implementing {@link Cloneable} or
* {@link java.io.Serializable}.
*/
public Object clone() {
WebContinuation clone = new WebContinuation(id, continuation, null, timeToLive, interpreterId, disposer);
// reset last access time
clone.lastAccessTime = this.lastAccessTime;
// recreate hierarchy recursively
for (Iterator iter = this.children.iterator(); iter.hasNext();) {
WebContinuation child = (WebContinuation) iter.next();
WebContinuation childClone = (WebContinuation) child.clone();
// relationships must be fixed manually
childClone.parentContinuation = clone;
clone.children.add(childClone);
}
return clone;
}
| 0 |
public class ByteBufferEntity extends AbstractHttpEntity {
public void close() throws IOException {
| 0 |
/**
* Convenience method to get the desired stack id from the service component
*
* @return the desired stack id
*/
StackId getDesiredStackId();
| 0 |
* <tt>"CommentURL"</tt> cookie attribute handler for RFC 2965 cookie spec.
*
* @since 4.0
*/ | 0 |
import org.apache.beam.dsls.sql.interpreter.operator.date.BeamSqlCurrentTimestampExpression;
case "Reinterpret":
return new BeamSqlCurrentTimeExpression(subExps);
case "LOCALTIMESTAMP":
return new BeamSqlCurrentTimestampExpression(subExps);
| 0 |
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
* @param params dictionary of key-value pairs to send in json body to Coordinator
Storage.MutateWork<T, E> work,
Map<String, String> params) {
if (coordinatorAllows(task, taskKey, slaPolicy, params)) {
ICoordinatorSlaPolicy slaPolicy,
Map<String, String> params)
String taskConfig = new TSerializer(new TSimpleJSONProtocol.Factory())
.toString(task.newBuilder());
JsonObject jsonBody = new JsonObject();
jsonBody.add("taskConfig", new JsonParser().parse(taskConfig));
jsonBody.addProperty(TASK_PARAM, taskKey);
params.forEach(jsonBody::addProperty);
.setBody(new Gson().toJson(jsonBody))
* @param params dictionary of key-value pairs to send in json body to Coordinator
Map<String, String> params,
work,
params)); | 0 |
import org.apache.beam.sdk.coders.CustomCoder;
public class ProtoCoder<T extends Message> extends CustomCoder<T> { | 0 |
public FileInputStream getFileInputStream(File file) throws IOException
return (FileInputStream) AccessController.doPrivileged(actions, m_acc);
public FileOutputStream getFileOutputStream(File file) throws IOException
return (FileOutputStream) AccessController.doPrivileged(actions, m_acc); | 0 |
String tableName, DBAccessor.DBColumnInfo columnInfo, boolean nullable) {
builder.append(" ALTER COLUMN ").append(columnInfo.getName()); | 0 |
return new BufferingNHttpEntity(response.getEntity(), HeapByteBufferAllocator.INSTANCE); | 0 |
import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkState;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.hash.Hashing; | 0 |
import org.osgi.framework.Constants;
* @param conf the instance configuration.
public ProvidedService(ProvidedServiceHandler handler, String[] specification, int factoryPolicy, Class creationStrategyClass, Dictionary conf) {
// Add the service.* if defined
if (conf.get(Constants.SERVICE_PID) != null) {
addProperty(new Property(Constants.SERVICE_PID, null, null, (String) conf.get(Constants.SERVICE_PID), String.class.getName(), handler.getInstanceManager(), handler));
}
if (conf.get(Constants.SERVICE_RANKING) != null) {
addProperty(new Property(Constants.SERVICE_RANKING, null, null, (String) conf.get(Constants.SERVICE_RANKING), "int", handler.getInstanceManager(), handler));
}
if (conf.get(Constants.SERVICE_VENDOR) != null) {
addProperty(new Property(Constants.SERVICE_VENDOR, null, null, (String) conf.get(Constants.SERVICE_VENDOR), String.class.getName(), handler.getInstanceManager(), handler));
}
if (conf.get(Constants.SERVICE_DESCRIPTION) != null) {
addProperty(new Property(Constants.SERVICE_DESCRIPTION, null, null, (String) conf.get(Constants.SERVICE_DESCRIPTION), String.class.getName(), handler.getInstanceManager(), handler));
}
| 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.