code
stringlengths 12
2.05k
| label
int64 0
1
| programming_language
stringclasses 9
values | cwe_id
stringlengths 6
14
| cwe_name
stringlengths 5
103
⌀ | description
stringlengths 36
1.23k
⌀ | url
stringlengths 36
48
⌀ | label_name
stringclasses 2
values |
---|---|---|---|---|---|---|---|
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} break;
case kTfLiteInt8: {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
EvalUsingLookupTable(data, input, output);
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and int8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | 1 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | safe |
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);
TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2);
// Support up to 32 bits.
TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
TF_LITE_ENSURE(context, SizeOfDimension(input, 0) >= 1);
if (NumInputs(node) == 3) {
const TfLiteTensor* weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight));
TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0),
SizeOfDimension(input, 0));
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
switch (params->type) {
case kTfLiteLshProjectionSparse:
outputSize->data[0] = SizeOfDimension(hash, 0);
break;
case kTfLiteLshProjectionDense:
outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1);
break;
default:
return kTfLiteError;
}
return context->ResizeTensor(context, output, outputSize);
} | 1 | C++ | CWE-369 | Divide By Zero | The product divides a value by zero. | https://cwe.mitre.org/data/definitions/369.html | safe |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check for supported activation types.
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
const bool is_pie = kernel_type == kLegacyPie;
// Pie and hybrid path supports all kinds of fused activations, otherwise only
// clipping activations are supported.
if (!is_pie && !is_hybrid) {
TF_LITE_ENSURE(context, params->activation == kTfLiteActNone ||
params->activation == kTfLiteActRelu ||
params->activation == kTfLiteActReluN1To1 ||
params->activation == kTfLiteActRelu6);
}
return PrepareImpl(context, node);
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) {
return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall(
asylo::system_call::kSYS_read, fd, buf, count));
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
static int traceDirective(MaState *state, cchar *key, cchar *value)
{
HttpRoute *route;
char *option, *ovalue, *tok;
route = state->route;
route->trace = httpCreateTrace(route->trace);
for (option = stok(sclone(value), " \t", &tok); option; option = stok(0, " \t", &tok)) {
option = stok(option, " =\t,", &ovalue);
ovalue = strim(ovalue, "\"'", MPR_TRIM_BOTH);
if (smatch(option, "content")) {
httpSetTraceContentSize(route->trace, (ssize) getnum(ovalue));
} else {
httpSetTraceEventLevel(route->trace, option, atoi(ovalue));
}
}
return 0;
} | 0 | C++ | NVD-CWE-Other | Other | NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset. | https://nvd.nist.gov/vuln/categories | vulnerable |
http_error_t::make_body (int n, const str &si, const str &aux)
{
strbuf b;
str ldesc;
const str sdesc = http_status.get_desc (n, &ldesc);
b << "<html>\n"
<< " <head>\n"
<< " <title>" << n << " " << sdesc << "</title>\n"
<< " </head>\n"
<< " <body>\n"
<< " <h1>Error " << n << " " << sdesc << "</h1><br><br>\n"
;
if (n == HTTP_NOT_FOUND && aux) {
b << "The file <tt>" << aux
<< "</tt> was not found on this server.<br><br>\n\n";
}
b << " <hr>\n"
<< " <i>" << si << "</i>\n"
<< " <br>\n"
<< " </body>\n"
<< "</html>\n"
;
return b;
} | 0 | C++ | CWE-79 | Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') | The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users. | https://cwe.mitre.org/data/definitions/79.html | vulnerable |
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
HardSwishParams* params = static_cast<HardSwishParams*>(node->user_data);
params->input_zero_point = input->params.zero_point;
params->output_zero_point = output->params.zero_point;
const float input_scale = input->params.scale;
const float hires_input_scale = (1.0f / 128.0f) * input_scale;
const float reluish_scale = 3.0f / 32768.0f;
const float output_scale = output->params.scale;
const double output_multiplier =
static_cast<double>(hires_input_scale / output_scale);
int32_t output_multiplier_fixedpoint_int32;
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_fixedpoint_int16);
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
const double reluish_multiplier =
static_cast<double>(hires_input_scale / reluish_scale);
int32_t reluish_multiplier_fixedpoint_int32;
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_fixedpoint_int16);
}
return kTfLiteOk;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
void CalculateOutputIndexRowSplit(
OpKernelContext* context, const RowPartitionTensor& row_split,
const vector<INDEX_TYPE>& parent_output_index,
INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size,
vector<INDEX_TYPE>* result) {
INDEX_TYPE row_split_size = row_split.size();
if (row_split_size > 0) {
result->reserve(row_split(row_split_size - 1));
}
for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) {
INDEX_TYPE row_length = row_split(i + 1) - row_split(i);
INDEX_TYPE real_length = std::min(output_size, row_length);
INDEX_TYPE parent_output_index_current = parent_output_index[i];
if (parent_output_index_current == -1) {
real_length = 0;
}
for (INDEX_TYPE j = 0; j < real_length; ++j) {
result->push_back(parent_output_index_current);
parent_output_index_current += output_index_multiplier;
}
for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) {
result->push_back(-1);
}
}
if (row_split_size > 0) {
OP_REQUIRES(context, result->size() == row_split(row_split_size - 1),
errors::InvalidArgument("Invalid row split size."));
}
} | 0 | C++ | CWE-131 | Incorrect Calculation of Buffer Size | The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow. | https://cwe.mitre.org/data/definitions/131.html | vulnerable |
TfLiteStatus PrepareHashtableImport(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
const TfLiteTensor* input_resource_id_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputResourceIdTensor,
&input_resource_id_tensor));
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_resource_id_tensor), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_resource_id_tensor, 0), 1);
const TfLiteTensor* key_tensor;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kKeyTensor, &key_tensor));
const TfLiteTensor* value_tensor;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueTensor, &value_tensor));
TF_LITE_ENSURE(context, (key_tensor->type == kTfLiteInt64 &&
value_tensor->type == kTfLiteString) ||
(key_tensor->type == kTfLiteString &&
value_tensor->type == kTfLiteInt64));
// TODO(b/144731295): Tensorflow lookup ops support 1-D vector in storing
// values.
TF_LITE_ENSURE(context, HaveSameShapes(key_tensor, value_tensor));
return kTfLiteOk;
} | 1 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | safe |
String StringUtil::Crypt(const String& input, const char *salt /* = "" */) {
assertx(salt);
if (salt[0] == '\0') {
raise_notice("crypt(): No salt parameter was specified."
" You must use a randomly generated salt and a strong"
" hash function to produce a secure hash.");
}
return String(string_crypt(input.c_str(), salt), AttachString);
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
static TfLiteRegistration DynamicCopyOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Output 0 is dynamic
TfLiteTensor* output0 = GetOutput(context, node, 0);
SetTensorToDynamic(output0);
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output1, TfLiteIntArrayCopy(input->dims)));
return kTfLiteOk;
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
// Not implemented since this isn't required in testing.
return kTfLiteOk;
};
return reg;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
string encryptBLSKeyShare2Hex(int *errStatus, char *err_string, const char *_key) {
CHECK_STATE(errStatus);
CHECK_STATE(err_string);
CHECK_STATE(_key);
auto keyArray = make_shared<vector<char>>(BUF_LEN, 0);
auto encryptedKey = make_shared<vector<uint8_t>>(BUF_LEN, 0);
vector<char> errMsg(BUF_LEN, 0);
strncpy(keyArray->data(), _key, BUF_LEN);
*errStatus = 0;
unsigned int encryptedLen = 0;
sgx_status_t status = trustedEncryptKeyAES(eid, errStatus, errMsg.data(), keyArray->data(), encryptedKey->data(), &encryptedLen);
HANDLE_TRUSTED_FUNCTION_ERROR(status, *errStatus, errMsg.data());
SAFE_CHAR_BUF(resultBuf, 2 * BUF_LEN + 1);
carray2Hex(encryptedKey->data(), encryptedLen, resultBuf, 2 * BUF_LEN + 1);
return string(resultBuf);
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
// Make sure the axis is only 1 dimension.
TF_LITE_ENSURE_EQ(context, NumElements(axis), 1);
// Make sure the axis is only either int32 or int64.
TF_LITE_ENSURE(context,
axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
switch (params->output_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
context->ReportError(context, "Unknown index output data type: %d",
params->output_type);
return kTfLiteError;
}
// Check conditions for different types.
switch (input->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
break;
default:
context->ReportError(
context,
"Unknown input type: %d, only float32 and int types are supported",
input->type);
return kTfLiteError;
}
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (IsConstantTensor(axis)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1)
{
jas_matind_t i;
jas_matind_t j;
if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ !=
mat1->numcols_) {
return 1;
}
for (i = 0; i < mat0->numrows_; i++) {
for (j = 0; j < mat0->numcols_; j++) {
if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) {
return 1;
}
}
}
return 0;
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
std::string queueloader::get_filename(const std::string& str) {
std::string fn = ctrl->get_dlpath();
if (fn[fn.length()-1] != NEWSBEUTER_PATH_SEP[0])
fn.append(NEWSBEUTER_PATH_SEP);
char buf[1024];
snprintf(buf, sizeof(buf), "%s", str.c_str());
char * base = basename(buf);
if (!base || strlen(base) == 0) {
char lbuf[128];
time_t t = time(nullptr);
strftime(lbuf, sizeof(lbuf), "%Y-%b-%d-%H%M%S.unknown", localtime(&t));
fn.append(lbuf);
} else {
fn.append(utils::replace_all(base, "'", "%27"));
}
return fn;
} | 1 | C++ | CWE-78 | Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection') | The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component. | https://cwe.mitre.org/data/definitions/78.html | safe |
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) {
double real_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1)
{
int i;
int j;
if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ !=
mat1->numcols_) {
return 1;
}
for (i = 0; i < mat0->numrows_; i++) {
for (j = 0; j < mat0->numcols_; j++) {
if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) {
return 1;
}
}
}
return 0;
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
static int virtualHostDirective(MaState *state, cchar *key, cchar *value)
{
state = maPushState(state);
if (state->enabled) {
/*
Inherit the current default route configuration (only)
Other routes are not inherited due to the reset routes below
*/
state->route = httpCreateInheritedRoute(httpGetHostDefaultRoute(state->host));
state->route->ssl = 0;
state->auth = state->route->auth;
state->host = httpCloneHost(state->host);
httpResetRoutes(state->host);
httpSetRouteHost(state->route, state->host);
httpSetHostDefaultRoute(state->host, state->route);
/* Set a default host and route name */
if (value) {
httpSetHostName(state->host, ssplit(sclone(value), " \t,", NULL));
httpSetRouteName(state->route, sfmt("default-%s", state->host->name));
/*
Save the endpoints until the close of the VirtualHost to closeVirtualHostDirective can
add the virtual host to the specified endpoints.
*/
state->endpoints = sclone(value);
}
}
return 0;
} | 1 | C++ | NVD-CWE-Other | Other | NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset. | https://nvd.nist.gov/vuln/categories | safe |
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
HardSwishParams* params = static_cast<HardSwishParams*>(node->user_data);
params->input_zero_point = input->params.zero_point;
params->output_zero_point = output->params.zero_point;
const float input_scale = input->params.scale;
const float hires_input_scale = (1.0f / 128.0f) * input_scale;
const float reluish_scale = 3.0f / 32768.0f;
const float output_scale = output->params.scale;
const double output_multiplier =
static_cast<double>(hires_input_scale / output_scale);
int32_t output_multiplier_fixedpoint_int32;
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_fixedpoint_int16);
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
const double reluish_multiplier =
static_cast<double>(hires_input_scale / reluish_scale);
int32_t reluish_multiplier_fixedpoint_int32;
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_fixedpoint_int16);
}
return kTfLiteOk;
} | 1 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | safe |
int fail(int* error_pos
, std::vector<lazy_entry*>& stack
, char const* start
, char const* orig_start)
{
while (!stack.empty()) {
lazy_entry* top = stack.back();
if (top->type() == lazy_entry::dict_t || top->type() == lazy_entry::list_t)
{
top->pop();
break;
}
stack.pop_back();
}
if (error_pos) *error_pos = start - orig_start;
return -1;
} | 1 | C++ | CWE-119 | Improper Restriction of Operations within the Bounds of a Memory Buffer | The software performs operations on a memory buffer, but it can read from or write to a memory location that is outside of the intended boundary of the buffer. | https://cwe.mitre.org/data/definitions/119.html | safe |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
if (IsDynamicTensor(output_values)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);
const int32 k = top_k->data.i32[0];
// The tensor can have more than 2 dimensions or even be a vector, the code
// anyway calls the internal dimension as row;
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const int32 row_size = input->dims->data[input->dims->size - 1];
int32 num_rows = 1;
for (int i = 0; i < input->dims->size - 1; ++i) {
num_rows *= input->dims->data[i];
}
switch (output_values->type) {
case kTfLiteFloat32:
TopK(row_size, num_rows, GetTensorData<float>(input), k,
output_indexes->data.i32, GetTensorData<float>(output_values));
break;
case kTfLiteUInt8:
TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32,
output_values->data.uint8);
break;
case kTfLiteInt8:
TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32,
output_values->data.int8);
break;
case kTfLiteInt32:
TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32,
output_values->data.i32);
break;
case kTfLiteInt64:
TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32,
output_values->data.i64);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
return kTfLiteOk;
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Just copy input to output.
const TfLiteTensor* input = GetInput(context, node, kInput);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* axis = GetInput(context, node, kAxis);
if (IsDynamicTensor(output)) {
int axis_value;
TF_LITE_ENSURE_OK(context,
GetAxisValueFromTensor(context, *axis, &axis_value));
TF_LITE_ENSURE_OK(context,
ExpandTensorDim(context, *input, axis_value, output));
}
if (output->type == kTfLiteString) {
TfLiteTensorRealloc(input->bytes, output);
}
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) {
ut32 i = 0;
ut64 offset = 0;
if (buf_offset + 8 > sz) {
return NULL;
}
RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);
offset += 6;
if (attr) {
attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR;
attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset);
offset += 2;
attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free);
for (i = 0; i < attr->info.annotation_array.num_annotations; i++) {
if (offset >= sz) {
break;
}
RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset);
if (annotation) {
offset += annotation->size;
r_list_append (attr->info.annotation_array.annotations, (void *) annotation);
}
}
attr->size = offset;
}
return attr;
} | 0 | C++ | CWE-788 | Access of Memory Location After End of Buffer | The software reads or writes to a buffer using an index or pointer that references a memory location after the end of the buffer. | https://cwe.mitre.org/data/definitions/788.html | vulnerable |
jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x)
{
jas_matrix_t *y;
int i;
int j;
y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x),
jas_seq2d_xend(x), jas_seq2d_yend(x));
assert(y);
for (i = 0; i < x->numrows_; ++i) {
for (j = 0; j < x->numcols_; ++j) {
*jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j);
}
}
return y;
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
void do_change_user(int afdt_fd) {
std::string uname;
lwp_read(afdt_fd, uname);
if (!uname.length()) return;
StructuredLogEntry* log = nullptr;
int err = 0;
SCOPE_EXIT {
if (log) {
log->setInt("errno", err);
log->setStr("new_user", uname);
StructuredLog::log("hhvm_lightprocess_error", *log);
delete log;
}
};
auto buf = PasswdBuffer{};
struct passwd *pw;
if (getpwnam_r(uname.c_str(), &buf.ent, buf.data.get(), buf.size, &pw)) {
err = errno;
log = new StructuredLogEntry();
log->setStr("function", "getpwnam_r");
if (LightProcess::g_strictUser) {
throw std::runtime_error{"getpwnam_r(): " + folly::errnoStr(err)};
}
return;
}
if (!pw) {
log = new StructuredLogEntry();
log->setStr("function", "getpwnam_r");
if (LightProcess::g_strictUser) {
throw std::runtime_error{"getpwnam_r(): not found"};
}
return;
}
if (pw->pw_gid) {
if (initgroups(pw->pw_name, pw->pw_gid)) {
err = errno;
log = new StructuredLogEntry();
log->setStr("function", "initgroups");
}
if (setgid(pw->pw_gid)) {
if (!log) {
err = errno;
log = new StructuredLogEntry();
log->setStr("function", "setgid");
if (LightProcess::g_strictUser) {
throw std::runtime_error{"setgid():" + folly::errnoStr(err)};
}
}
}
}
if (pw->pw_uid) {
if (setuid(pw->pw_uid)) {
if (!log) {
err = errno;
log = new StructuredLogEntry();
log->setStr("function", "setuid");
if (LightProcess::g_strictUser) {
throw std::runtime_error{"setuid():" + folly::errnoStr(err)};
}
}
}
}
} | 1 | C++ | CWE-22 | Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') | The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory. | https://cwe.mitre.org/data/definitions/22.html | safe |
void ecall_filter(uint8_t *condition, size_t condition_length,
uint8_t *input_rows, size_t input_rows_length,
uint8_t **output_rows, size_t *output_rows_length) {
// Guard against operating on arbitrary enclave memory
assert(sgx_is_outside_enclave(input_rows, input_rows_length) == 1);
sgx_lfence();
try {
filter(condition, condition_length,
input_rows, input_rows_length,
output_rows, output_rows_length);
} catch (const std::runtime_error &e) {
ocall_throw(e.what());
}
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
RawTile KakaduImage::getRegion( int seq, int ang, unsigned int res, int layers, int x, int y, unsigned int w, unsigned int h )
{
// Scale up our output bit depth to the nearest factor of 8
unsigned int obpc = bpc;
if( bpc <= 16 && bpc > 8 ) obpc = 16;
else if( bpc <= 8 ) obpc = 8;
#ifdef DEBUG
Timer timer;
timer.start();
#endif
RawTile rawtile( 0, res, seq, ang, w, h, channels, obpc );
if( obpc == 16 ) rawtile.data = new unsigned short[w*h*channels];
else if( obpc == 8 ) rawtile.data = new unsigned char[w*h*channels];
else throw file_error( "Kakadu :: Unsupported number of bits" );
rawtile.dataLength = w*h*channels*(obpc/8);
rawtile.filename = getImagePath();
rawtile.timestamp = timestamp;
process( res, layers, x, y, w, h, rawtile.data );
#ifdef DEBUG
logfile << "Kakadu :: getRegion() :: " << timer.getTime() << " microseconds" << endl;
#endif
return rawtile;
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
ProcessTCPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
ULONG tcpipDataAt;
tTcpIpPacketParsingResult res = _res;
tcpipDataAt = ipHeaderSize + sizeof(TCPHeader);
res.xxpStatus = ppresXxpIncomplete;
res.TcpUdp = ppresIsTCP;
if (len >= tcpipDataAt)
{
TCPHeader *pTcpHeader = (TCPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
res.xxpStatus = ppresXxpKnown;
tcpipDataAt = ipHeaderSize + TCP_HEADER_LENGTH(pTcpHeader);
res.XxpIpHeaderSize = tcpipDataAt;
}
else
{
DPrintf(2, ("tcp: %d < min headers %d\n", len, tcpipDataAt));
}
return res;
} | 0 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | vulnerable |
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
auto tf_dlm_context = GetDlContext(h, status);
if (!status->status.ok()) {
return nullptr;
}
auto* tf_dlm_data = TFE_TensorHandleDevicePointer(h, status);
if (!status->status.ok()) {
return nullptr;
}
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
auto tf_dlm_type = GetDlDataType(data_type, status);
if (!status->status.ok()) {
return nullptr;
}
TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.ctx = tf_dlm_context;
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = tf_dlm_data;
dlm_tensor->dl_tensor.dtype = tf_dlm_type;
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = shape_arr->data();
// There are two ways to represent compact row-major data
// 1) nullptr indicates tensor is compact and row-majored.
// 2) fill in the strides array as the real case for compact row-major data.
// Here we choose option 2, since some frameworks didn't handle the strides
// argument properly.
dlm_tensor->dl_tensor.strides = stride_arr->data();
dlm_tensor->dl_tensor.byte_offset =
0; // TF doesn't handle the strides and byte_offsets here
return static_cast<void*>(dlm_tensor);
} | 1 | C++ | CWE-908 | Use of Uninitialized Resource | The software uses or accesses a resource that has not been initialized. | https://cwe.mitre.org/data/definitions/908.html | safe |
TensorBuffer* FromProtoField<ResourceHandle>(Allocator* a,
const TensorProto& in, int64_t n) {
CHECK_GT(n, 0);
Buffer<ResourceHandle>* buf = new Buffer<ResourceHandle>(a, n);
ResourceHandle* data = buf->template base<ResourceHandle>();
if (data == nullptr) {
buf->Unref();
return nullptr;
}
const int64_t in_n = ProtoHelper<ResourceHandle>::NumElements(in);
if (in_n <= 0) {
std::fill_n(data, n, ResourceHandle());
} else {
// If tensor shape says we have n < in_n elements in the output tensor
// then make sure to only decode the first n out of the in_n elements in the
// in tensors. In all other cases, we decode all in_n elements of in and set
// the remaining elements up to n to be the default ResourceHandle() value.
const int64_t real_n = n < in_n ? n : in_n;
for (int64_t i = 0; i < real_n; ++i) {
Status s = ResourceHandle::BuildResourceHandle(in.resource_handle_val(i),
&data[i]);
if (!s.ok()) {
LOG(ERROR) << "Could not decode resource handle from proto \""
<< in.resource_handle_val(i).ShortDebugString()
<< "\", returned status: " << s.ToString();
buf->Unref();
return nullptr;
}
}
for (int64_t i = in_n; i < n; ++i) {
data[i] = ResourceHandle();
}
}
return buf;
} | 1 | C++ | CWE-617 | Reachable Assertion | The product contains an assert() or similar statement that can be triggered by an attacker, which leads to an application exit or other behavior that is more severe than necessary. | https://cwe.mitre.org/data/definitions/617.html | safe |
static Status ValidateFunctionNotRecursive(const FunctionDef& function) {
const auto& function_name = function.signature().name();
for (const auto& node : function.node_def()) {
if (node.op() == function_name) {
return errors::FailedPrecondition(
"Function ", function_name,
" is self recursive and TensorFlow does not support this scenario.");
}
}
return Status::OK();
} | 1 | C++ | CWE-400 | Uncontrolled Resource Consumption | The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources. | https://cwe.mitre.org/data/definitions/400.html | safe |
String string_chunk_split(const char *src, int srclen, const char *end,
int endlen, int chunklen) {
int chunks = srclen / chunklen; // complete chunks!
int restlen = srclen - chunks * chunklen; /* srclen % chunklen */
String ret(
safe_address(
chunks + 1,
endlen,
srclen
),
ReserveString
);
char *dest = ret.bufferSlice().ptr;
const char *p; char *q;
const char *pMax = src + srclen - chunklen + 1;
for (p = src, q = dest; p < pMax; ) {
memcpy(q, p, chunklen);
q += chunklen;
memcpy(q, end, endlen);
q += endlen;
p += chunklen;
}
if (restlen) {
memcpy(q, p, restlen);
q += restlen;
memcpy(q, end, endlen);
q += endlen;
}
ret.setSize(q - dest);
return ret;
} | 1 | C++ | CWE-189 | Numeric Errors | Weaknesses in this category are related to improper calculation or conversion of numbers. | https://cwe.mitre.org/data/definitions/189.html | safe |
static int _hostsock_getpeername(
oe_fd_t* sock_,
struct oe_sockaddr* addr,
oe_socklen_t* addrlen)
{
int ret = -1;
sock_t* sock = _cast_sock(sock_);
oe_socklen_t addrlen_in = 0;
oe_errno = 0;
if (!sock)
OE_RAISE_ERRNO(OE_EINVAL);
if (addrlen)
addrlen_in = *addrlen;
if (oe_syscall_getpeername_ocall(
&ret,
sock->host_fd,
(struct oe_sockaddr*)addr,
addrlen_in,
addrlen) != OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
return ret;
} | 0 | C++ | NVD-CWE-Other | Other | NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset. | https://nvd.nist.gov/vuln/categories | vulnerable |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
output->type = kTfLiteInt32;
// By design, the input shape is always known at the time of Prepare, even
// if the preceding op that generates |input| is dynamic. Thus, we can
// always compute the rank immediately, without waiting for Eval.
SetTensorToPersistentRo(output);
// Rank produces a 0-D int32 Tensor representing the rank of input.
TfLiteIntArray* output_size = TfLiteIntArrayCreate(0);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0);
// Immediately propagate the known rank to the output tensor. This allows
// downstream ops that rely on the value to use it during prepare.
if (output->type == kTfLiteInt32) {
int32_t* output_data = GetTensorData<int32_t>(output);
*output_data = NumDimensions(input);
} else {
return kTfLiteError;
}
return kTfLiteOk;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
njs_vm_start(njs_vm_t *vm)
{
njs_int_t ret;
ret = njs_module_load(vm);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_vmcode_interpreter(vm, vm->start);
return (ret == NJS_ERROR) ? NJS_ERROR : NJS_OK;
} | 0 | C++ | CWE-416 | Use After Free | Referencing memory after it has been freed can cause a program to crash, use unexpected values, or execute code. | https://cwe.mitre.org/data/definitions/416.html | vulnerable |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
// Make sure all the inputs are scalars.
TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0);
// Currently only supports int32 and float.
// TODO(b/117912892): Support quantization as well.
const auto dtype = start->type;
if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) {
context->ReportError(context, "Unknown index output data type: %s",
TfLiteTypeGetName(dtype));
return kTfLiteError;
}
TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype);
TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = dtype;
if (IsConstantTensor(start) && IsConstantTensor(limit) &&
IsConstantTensor(delta)) {
return ResizeOutput(context, start, limit, delta, output);
}
SetTensorToDynamic(output);
return kTfLiteOk;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
Http::FilterDataStatus Context::onResponseBody(int body_buffer_length, bool end_of_stream) {
if (!wasm_->onResponseBody_) {
return Http::FilterDataStatus::Continue;
}
switch (wasm_
->onResponseBody_(this, id_, static_cast<uint32_t>(body_buffer_length),
static_cast<uint32_t>(end_of_stream))
.u64_) {
case 0:
return Http::FilterDataStatus::Continue;
case 1:
return Http::FilterDataStatus::StopIterationAndBuffer;
case 2:
return Http::FilterDataStatus::StopIterationAndWatermark;
default:
return Http::FilterDataStatus::StopIterationNoBuffer;
}
} | 0 | C++ | CWE-476 | NULL Pointer Dereference | A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit. | https://cwe.mitre.org/data/definitions/476.html | vulnerable |
void HeaderTable::setCapacity(uint32_t capacity) {
// TODO: ddmello - the below is a little dangerous as we update the
// capacity right away. Some properties of the class utilize that variable
// and so might be better to refactor and update capacity at the end of the
// method (and update other methods)
auto oldCapacity = capacity_;
capacity_ = capacity;
if (capacity_ == oldCapacity) {
return;
} else if (capacity_ < oldCapacity) {
// NOTE: currently no actual resizing is performed...
evict(0);
} else {
// NOTE: due to the above lack of resizing, we must determine whether a
// resize is actually appropriate (to handle cases where the underlying
// vector is still >= to the size related to the new capacity requested)
uint32_t newLength = (capacity_ >> 5) + 1;
if (newLength > table_.size()) {
auto oldTail = tail();
auto oldLength = table_.size();
table_.resize(newLength);
if (size_ > 0 && oldTail > head_) {
// the list wrapped around, need to move oldTail..oldLength to the end
// of the now-larger table_
std::copy(table_.begin() + oldTail, table_.begin() + oldLength,
table_.begin() + newLength - (oldLength - oldTail));
// Update the names indecies that pointed to the old range
for (auto& names_it: names_) {
for (auto& idx: names_it.second) {
if (idx >= oldTail) {
DCHECK_LT(idx + (table_.size() - oldLength), table_.size());
idx += (table_.size() - oldLength);
} else {
// remaining indecies in the list were smaller than oldTail, so
// should be indexed from 0
break;
}
}
}
}
}
}
} | 1 | C++ | CWE-416 | Use After Free | Referencing memory after it has been freed can cause a program to crash, use unexpected values, or execute code. | https://cwe.mitre.org/data/definitions/416.html | safe |
void jas_matrix_divpow2(jas_matrix_t *matrix, int n)
{
jas_matind_t i;
jas_matind_t j;
jas_seqent_t *rowstart;
jas_matind_t rowstep;
jas_seqent_t *data;
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
*data = (*data >= 0) ? ((*data) >> n) :
(-((-(*data)) >> n));
}
}
}
} | 1 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | safe |
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
{
int dy = y1 - y0;
int adx = x1 - x0;
int ady = abs(dy);
int base;
int x=x0,y=y0;
int err = 0;
int sy;
#ifdef STB_VORBIS_DIVIDE_TABLE
if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integer_divide_table[ady][adx];
sy = base-1;
} else {
base = integer_divide_table[ady][adx];
sy = base+1;
}
} else {
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
}
#else
base = dy / adx;
if (dy < 0)
sy = base - 1;
else
sy = base+1;
#endif
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
LINE_OP(output[x], inverse_db_table[y]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else
y += base;
LINE_OP(output[x], inverse_db_table[y]);
}
}
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
TEST(Random, SecureFork) {
unsigned char buffer = 0;
// Init random buffer
folly::Random::secureRandom(&buffer, 1);
auto pid = fork();
EXPECT_NE(pid, -1);
if (pid) {
// parent
int status = 0;
folly::Random::secureRandom(&buffer, 1);
auto pid2 = wait(&status);
EXPECT_NE(WEXITSTATUS(status), buffer);
EXPECT_EQ(pid, pid2);
} else {
// child
folly::Random::secureRandom(&buffer, 1);
exit(buffer); // Do not print gtest results
}
} | 1 | C++ | CWE-119 | Improper Restriction of Operations within the Bounds of a Memory Buffer | The software performs operations on a memory buffer, but it can read from or write to a memory location that is outside of the intended boundary of the buffer. | https://cwe.mitre.org/data/definitions/119.html | safe |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
SetTensorToDynamic(GetOutput(context, node, i));
}
return kTfLiteOk;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
static ssize_t _hostfs_writev(
oe_fd_t* desc,
const struct oe_iovec* iov,
int iovcnt)
{
ssize_t ret = -1;
file_t* file = _cast_file(desc);
void* buf = NULL;
size_t buf_size = 0;
if (!file || !iov || iovcnt < 0 || iovcnt > OE_IOV_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Flatten the IO vector into contiguous heap memory. */
if (oe_iov_pack(iov, iovcnt, &buf, &buf_size) != 0)
OE_RAISE_ERRNO(OE_ENOMEM);
/* Call the host. */
if (oe_syscall_writev_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) !=
OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
if (buf)
oe_free(buf);
return ret;
} | 0 | C++ | NVD-CWE-Other | Other | NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset. | https://nvd.nist.gov/vuln/categories | vulnerable |
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned int size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
} | 1 | C++ | CWE-416 | Use After Free | Referencing memory after it has been freed can cause a program to crash, use unexpected values, or execute code. | https://cwe.mitre.org/data/definitions/416.html | safe |
transientObjectPutErrorMessage(Runtime *runtime, Handle<> base, SymbolID id) {
// Emit an error message that looks like:
// "Cannot create property '%{id}' on ${typeof base} '${String(base)}'".
StringView propName =
runtime->getIdentifierTable().getStringView(runtime, id);
Handle<StringPrimitive> baseType =
runtime->makeHandle(vmcast<StringPrimitive>(typeOf(runtime, base)));
StringView baseTypeAsString =
StringPrimitive::createStringView(runtime, baseType);
MutableHandle<StringPrimitive> valueAsString{runtime};
if (base->isSymbol()) {
// Special workaround for Symbol which can't be stringified.
auto str = symbolDescriptiveString(runtime, Handle<SymbolID>::vmcast(base));
if (str != ExecutionStatus::EXCEPTION) {
valueAsString = *str;
} else {
runtime->clearThrownValue();
valueAsString = StringPrimitive::createNoThrow(
runtime, "<<Exception occurred getting the value>>");
}
} else {
auto str = toString_RJS(runtime, base);
assert(
str != ExecutionStatus::EXCEPTION &&
"Primitives should be convertible to string without exceptions");
valueAsString = std::move(*str);
}
StringView valueAsStringPrintable =
StringPrimitive::createStringView(runtime, valueAsString);
SmallU16String<32> tmp;
return runtime->raiseTypeError(
TwineChar16("Cannot create property '") + propName + "' on " +
baseTypeAsString.getUTF16Ref(tmp) + " '" +
valueAsStringPrintable.getUTF16Ref(tmp) + "'");
} | 0 | C++ | CWE-416 | Use After Free | Referencing memory after it has been freed can cause a program to crash, use unexpected values, or execute code. | https://cwe.mitre.org/data/definitions/416.html | vulnerable |
size_t jsuGetFreeStack() {
#ifdef ARM
void *frame = __builtin_frame_address(0);
size_t stackPos = (size_t)((char*)frame);
size_t stackEnd = (size_t)((char*)&LINKER_END_VAR);
if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow!
return stackPos - stackEnd;
#elif defined(LINUX)
// On linux, we set STACK_BASE from `main`.
char ptr; // this is on the stack
extern void *STACK_BASE;
uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr);
const uint32_t max_stack = 1000000; // give it 1 megabyte of stack
if (count>max_stack) return 0;
return max_stack - count;
#else
// stack depth seems pretty platform-specific :( Default to a value that disables it
return 1000000; // no stack depth check on this platform
#endif
} | 1 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | safe |
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
const LeakyReluOpData* data =
reinterpret_cast<LeakyReluOpData*>(node->user_data);
LeakyReluParams op_params;
switch (input->type) {
case kTfLiteFloat32: {
op_params.alpha = params->alpha;
optimized_ops::LeakyRelu(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
QuantizeLeakyRelu<uint8_t>(input, output, data);
return kTfLiteOk;
} break;
case kTfLiteInt8: {
QuantizeLeakyRelu<int8_t>(input, output, data);
return kTfLiteOk;
} break;
case kTfLiteInt16: {
QuantizeLeakyRelu<int16_t>(input, output, data);
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, int8, int16 and uint8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
void FormatConverter<T>::InitSparseToDenseConverter(
std::vector<int> shape, std::vector<int> traversal_order,
std::vector<TfLiteDimensionType> format, std::vector<int> dense_size,
std::vector<std::vector<int>> segments,
std::vector<std::vector<int>> indices, std::vector<int> block_map) {
dense_shape_ = std::move(shape);
traversal_order_ = std::move(traversal_order);
block_map_ = std::move(block_map);
format_ = std::move(format);
dense_size_ = 1;
for (int i = 0; i < dense_shape_.size(); i++) {
dense_size_ *= dense_shape_[i];
}
dim_metadata_.resize(2 * format_.size());
for (int i = 0; i < format_.size(); i++) {
if (format_[i] == kTfLiteDimDense) {
dim_metadata_[2 * i] = {dense_size[i]};
} else {
dim_metadata_[2 * i] = std::move(segments[i]);
dim_metadata_[2 * i + 1] = std::move(indices[i]);
}
}
int original_rank = dense_shape_.size();
int block_dim = 0;
blocked_shape_.resize(original_rank);
block_size_.resize(block_map_.size());
for (int i = 0; i < original_rank; i++) {
if (block_dim < block_map_.size() && block_map_[block_dim] == i) {
if (original_rank + block_dim < traversal_order_.size()) {
int orig_dim = traversal_order_[original_rank + block_dim];
block_size_[block_dim] = dense_size[orig_dim];
blocked_shape_[i] = dense_shape_[i] / dense_size[orig_dim];
block_dim++;
}
} else {
blocked_shape_[i] = dense_shape_[i];
}
}
} | 1 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | safe |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* multipliers;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputMultipliers, &multipliers));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
switch (output->type) {
case kTfLiteFloat32:
Tile<float>(*(input->dims), input, multipliers, output);
break;
case kTfLiteUInt8:
Tile<uint8_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteInt32:
Tile<int32_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteInt64:
Tile<int64_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteString: {
DynamicBuffer buffer;
TileString(*(input->dims), input, multipliers, &buffer, output);
buffer.WriteToTensor(output, /*new_shape=*/nullptr);
break;
}
case kTfLiteBool:
Tile<bool>(*(input->dims), input, multipliers, output);
break;
default:
context->ReportError(context, "Type '%s' is not supported by tile.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
void writeStats(Array& /*ret*/) override {
fprintf(stderr, "writeStats start\n");
// RetSame: the return value is the same instance every time
// HasThis: call has a this argument
// AllSame: all returns were the same data even though args are different
// MemberCount: number of different arg sets (including this)
fprintf(stderr, "Count Function MinSerLen MaxSerLen RetSame HasThis "
"AllSame MemberCount\n");
for (auto& me : m_memos) {
if (me.second.m_ignore) continue;
if (me.second.m_count == 1) continue;
int min_ser_len = 999999999;
int max_ser_len = 0;
int count = 0;
int member_count = 0;
bool all_same = true;
if (me.second.m_has_this) {
bool any_multiple = false;
auto& fr = me.second.m_member_memos.begin()->second.m_return_value;
member_count = me.second.m_member_memos.size();
for (auto& mme : me.second.m_member_memos) {
if (mme.second.m_return_value != fr) all_same = false;
count += mme.second.m_count;
auto ser_len = mme.second.m_return_value.length();
min_ser_len = std::min(min_ser_len, ser_len);
max_ser_len = std::max(max_ser_len, ser_len);
if (mme.second.m_count > 1) any_multiple = true;
}
if (!any_multiple && !all_same) continue;
} else {
min_ser_len = max_ser_len = me.second.m_return_value.length();
count = me.second.m_count;
all_same = me.second.m_ret_tv_same;
}
fprintf(stderr, "%d %s %d %d %s %s %s %d\n",
count, me.first.data(),
min_ser_len, max_ser_len,
me.second.m_ret_tv_same ? " true" : "false",
me.second.m_has_this ? " true" : "false",
all_same ? " true" : "false",
member_count
);
}
fprintf(stderr, "writeStats end\n");
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
static void _send(Socket_T S, const char *request, StringBuffer_T data) {
_argument(data, "format", "text");
char *_auth = _getBasicAuthHeader();
MD_T token;
StringBuffer_append(data, "%ssecuritytoken=%s", StringBuffer_length(data) > 0 ? "&" : "", Util_getToken(token));
int rv = Socket_print(S,
"POST %s HTTP/1.0\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"Cookie: securitytoken=%s\r\n"
"Content-Length: %d\r\n"
"%s"
"\r\n"
"%s",
request,
token,
StringBuffer_length(data),
_auth ? _auth : "",
StringBuffer_toString(data));
FREE(_auth);
if (rv < 0)
THROW(IOException, "Monit: cannot send command to the monit daemon -- %s", STRERROR);
} | 1 | C++ | CWE-352 | Cross-Site Request Forgery (CSRF) | The web application does not, or can not, sufficiently verify whether a well-formed, valid, consistent request was intentionally provided by the user who submitted the request. | https://cwe.mitre.org/data/definitions/352.html | safe |
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
TfLiteIntArray* output_shape = GetOutputShape(context, node);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Tensorflow's Reshape allows one of the shape components to have the
// special -1 value, meaning it will be calculated automatically based on the
// input. Here we calculate what that dimension should be so that the number
// of output elements in the same as the number of input elements.
int num_input_elements = NumElements(input);
int num_output_elements = 1;
int stretch_dim = -1;
for (int i = 0; i < output_shape->size; ++i) {
int value = output_shape->data[i];
if (value == -1) {
TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
stretch_dim = i;
} else {
num_output_elements *= value;
}
}
if (stretch_dim != -1) {
output_shape->data[stretch_dim] = num_input_elements / num_output_elements;
num_output_elements *= output_shape->data[stretch_dim];
}
TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
return context->ResizeTensor(context, output, scoped_output_shape.release());
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
const std::string& get_id() const {
ceph_assert(t != Wildcard && t != Tenant);
return u.id;
} | 0 | C++ | CWE-617 | Reachable Assertion | The product contains an assert() or similar statement that can be triggered by an attacker, which leads to an application exit or other behavior that is more severe than necessary. | https://cwe.mitre.org/data/definitions/617.html | vulnerable |
TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input = GetInput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool);
return PrepareSimple(context, node);
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
ProcessUDPHeader(tTcpIpPacketParsingResult _res, PVOID pIpHeader, ULONG len, USHORT ipHeaderSize)
{
tTcpIpPacketParsingResult res = _res;
ULONG udpDataStart = ipHeaderSize + sizeof(UDPHeader);
res.TcpUdp = ppresIsUDP;
res.XxpIpHeaderSize = udpDataStart;
if (len >= udpDataStart)
{
UDPHeader *pUdpHeader = (UDPHeader *)RtlOffsetToPointer(pIpHeader, ipHeaderSize);
USHORT datagramLength = swap_short(pUdpHeader->udp_length);
res.xxpStatus = ppresXxpKnown;
res.xxpFull = TRUE;
// may be full or not, but the datagram length is known
DPrintf(2, ("udp: len %d, datagramLength %d\n", len, datagramLength));
}
else
{
res.xxpFull = FALSE;
res.xxpStatus = ppresXxpIncomplete;
}
return res;
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node,
bool is_string_allowed) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Don't support string.
if (!is_string_allowed) {
TF_LITE_ENSURE(context, input1->type != kTfLiteString);
}
// Currently only support tensors have the same type.
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = kTfLiteBool;
bool requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
gdImagePtr gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof(int) + sizeof(unsigned char), sx * sy)) {
return NULL;
}
// Check for OOM before doing a potentially large allocation.
auto allocsz = sizeof(gdImage)
+ (sizeof(int *) + sizeof(unsigned char *)) * sy
+ (sizeof(int) + sizeof(unsigned char)) * sx * sy;
if (UNLIKELY(precheckOOM(allocsz))) {
// Don't throw here because GD might need to do its own cleanup.
return NULL;
}
im = (gdImage *) gdMalloc(sizeof(gdImage));
memset(im, 0, sizeof(gdImage));
im->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
* off by default. This allows font antialiasing to work as expected
* on the first try in JPEGs -- quite important -- and also allows
* for smaller PNGs when saving of alpha channel is not really
* desired, which it usually isn't!
*/
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
void jas_matrix_asr(jas_matrix_t *matrix, int n)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
assert(n >= 0);
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
//*data >>= n;
*data = jas_seqent_asr(*data, n);
}
}
}
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
armv6pmu_handle_irq(int irq_num,
void *dev)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
if (!armv6_pmcr_has_overflowed(pmcr))
return IRQ_NONE;
regs = get_irq_regs();
/*
* The interrupts are cleared by writing the overflow flags back to
* the control register. All of the other bits don't have any effect
* if they are rewritten, so write the whole value back.
*/
armv6_pmcr_write(pmcr);
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
} | 1 | C++ | CWE-400 | Uncontrolled Resource Consumption | The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources. | https://cwe.mitre.org/data/definitions/400.html | safe |
void *bson_realloc( void *ptr, int size ) {
void *p;
p = bson_realloc_func( ptr, size );
bson_fatal_msg( !!p, "realloc() failed" );
return p;
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
assign_eip_near(ctxt, ctxt->_eip + rel);
} | 0 | C++ | NVD-CWE-noinfo | null | null | null | vulnerable |
int64_t size() const {
return m_str ? m_str->size() : 0;
} | 1 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | safe |
bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr) const
{
auto* region = region_from_vaddr(process, vaddr);
return region && region->is_writable();
} | 0 | C++ | CWE-119 | Improper Restriction of Operations within the Bounds of a Memory Buffer | The software performs operations on a memory buffer, but it can read from or write to a memory location that is outside of the intended boundary of the buffer. | https://cwe.mitre.org/data/definitions/119.html | vulnerable |
YCPBoolean IniAgent::Write(const YCPPath &path, const YCPValue& value, const YCPValue& arg)
{
if (!parser.isStarted())
{
y2warning("Can't execute Write before being mounted.");
return YCPBoolean (false);
}
// no need to update if modified, we are changing value
bool ok = false; // is the _path_ ok?
// return value
YCPBoolean b (true);
if (0 == path->length ())
{
if (value->isString() && value->asString()->value() == "force")
parser.inifile.setDirty();
else if (value->isString () && value->asString()->value() == "clean")
parser.inifile.clean ();
if (0 != parser.write ())
b = false;
ok = true;
}
else
{
if (( parser.repeatNames () && value->isList ()) ||
(!parser.repeatNames () && (value->isString () || value->isInteger())) ||
path->component_str(0) == "all"
)
{
ok = true;
if (parser.inifile.Write (path, value, parser.HaveRewrites ()))
b = false;
}
else if (value->isVoid ())
{
int wb = -1;
string del_sec = "";
ok = true;
if (2 == path->length ())
{
string pc = path->component_str(0);
if ("s" == pc || "section" == pc)
{ // request to delete section. Find the file name
del_sec = path->component_str (1);
wb = parser.inifile.getSubSectionRewriteBy (del_sec.c_str());
}
}
if (parser.inifile.Delete (path))
b = false;
else if (del_sec != "")
{
parser.deleted_sections.insert (parser.getFileName (del_sec, wb));
}
}
else
{
ycp2error ("Wrong value for path %s: %s", path->toString ().c_str (), value->toString ().c_str ());
b = false;
}
}
if (!ok)
{
ycp2error ( "Wrong path '%s' in Write().", path->toString().c_str () );
}
return b;
} | 0 | C++ | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor | The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information. | https://cwe.mitre.org/data/definitions/200.html | vulnerable |
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
auto col_params = new CollectiveParams();
auto done_with_cleanup = [col_params, done = std::move(done)]() {
done();
col_params->Unref();
};
OP_REQUIRES_OK_ASYNC(c,
FillCollectiveParams(col_params, REDUCTION_COLLECTIVE,
/*group_size*/ c->input(1),
/*group_key*/ c->input(2),
/*instance_key*/ c->input(3)),
done_with_cleanup);
col_params->instance.shape = c->input(0).shape();
col_params->merge_op = merge_op_.get();
col_params->final_op = final_op_.get();
VLOG(1) << "CollectiveReduceV2 group_size " << col_params->group.group_size
<< " group_key " << col_params->group.group_key << " instance_key "
<< col_params->instance.instance_key;
// Allocate the output tensor, trying to reuse the input.
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(c,
c->forward_input_or_allocate_output(
{0}, 0, col_params->instance.shape, &output),
done_with_cleanup);
Run(c, col_params, std::move(done_with_cleanup));
} | 1 | C++ | CWE-416 | Use After Free | Referencing memory after it has been freed can cause a program to crash, use unexpected values, or execute code. | https://cwe.mitre.org/data/definitions/416.html | safe |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type != kTfLiteComplex64) {
context->ReportError(context,
"Type '%s' for output is not supported by rfft2d.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
// Resize the output tensor if the fft_length tensor is not constant.
// Otherwise, check if the output shape is correct.
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1] / 2 + 1);
}
return Rfft2dHelper(context, node);
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
std::string TarFileReader::extract(const string &_path) {
if (_path.empty()) THROW("path cannot be empty");
if (!hasMore()) THROW("No more tar files");
string path = _path;
if (SystemUtilities::isDirectory(path)) path += "/" + getFilename();
LOG_DEBUG(5, "Extracting: " << path);
return extract(*SystemUtilities::oopen(path));
} | 0 | C++ | CWE-22 | Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') | The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory. | https://cwe.mitre.org/data/definitions/22.html | vulnerable |
void PropertiesWidget::loadTorrentInfos(BitTorrent::TorrentHandle *const torrent)
{
clear();
m_torrent = torrent;
downloaded_pieces->setTorrent(m_torrent);
pieces_availability->setTorrent(m_torrent);
if (!m_torrent) return;
// Save path
updateSavePath(m_torrent);
// Hash
hash_lbl->setText(m_torrent->hash());
PropListModel->model()->clear();
if (m_torrent->hasMetadata()) {
// Creation date
lbl_creationDate->setText(m_torrent->creationDate().toString(Qt::DefaultLocaleShortDate));
label_total_size_val->setText(Utils::Misc::friendlyUnit(m_torrent->totalSize()));
// Comment
comment_text->setText(Utils::Misc::parseHtmlLinks(Utils::String::toHtmlEscaped(m_torrent->comment())));
// URL seeds
loadUrlSeeds();
label_created_by_val->setText(Utils::String::toHtmlEscaped(m_torrent->creator()));
// List files in torrent
PropListModel->model()->setupModelData(m_torrent->info());
filesList->setExpanded(PropListModel->index(0, 0), true);
// Load file priorities
PropListModel->model()->updateFilesPriorities(m_torrent->filePriorities());
}
// Load dynamic data
loadDynamicData();
} | 1 | C++ | CWE-79 | Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') | The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users. | https://cwe.mitre.org/data/definitions/79.html | safe |
static float *get_window(vorb *f, int len)
{
len <<= 1;
if (len == f->blocksize_0) return f->window[0];
if (len == f->blocksize_1) return f->window[1];
assert(0);
return NULL;
} | 0 | C++ | CWE-369 | Divide By Zero | The product divides a value by zero. | https://cwe.mitre.org/data/definitions/369.html | vulnerable |
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) {
if(!isWritable() || srcLength == 0 || srcChars == NULL) {
return *this;
}
// Perform all remaining operations relative to srcChars + srcStart.
// From this point forward, do not use srcStart.
srcChars += srcStart;
if(srcLength < 0) {
// get the srcLength if necessary
if((srcLength = u_strlen(srcChars)) == 0) {
return *this;
}
}
int32_t oldLength = length();
int32_t newLength = oldLength + srcLength;
// Check for append onto ourself
const UChar* oldArray = getArrayStart();
if (isBufferWritable() &&
oldArray < srcChars + srcLength &&
srcChars < oldArray + oldLength) {
// Copy into a new UnicodeString and start over
UnicodeString copy(srcChars, srcLength);
if (copy.isBogus()) {
setToBogus();
return *this;
}
return doAppend(copy.getArrayStart(), 0, srcLength);
}
// optimize append() onto a large-enough, owned string
if((newLength <= getCapacity() && isBufferWritable()) ||
cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) {
UChar *newArray = getArrayStart();
// Do not copy characters when
// UChar *buffer=str.getAppendBuffer(...);
// is followed by
// str.append(buffer, length);
// or
// str.appendString(buffer, length)
// or similar.
if(srcChars != newArray + oldLength) {
us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength);
}
setLength(newLength);
}
return *this;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
static int lookup1_values(int entries, int dim)
{
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
if (pow((float) r+1, dim) <= entries)
return -1;
if ((int) floor(pow((float) r, dim)) > entries)
return -1;
return r;
} | 1 | C++ | CWE-369 | Divide By Zero | The product divides a value by zero. | https://cwe.mitre.org/data/definitions/369.html | safe |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
void MoveTo(double x1,double y1) {
outpos +=
sprintf(outpos,"\n %12.3f %12.3f m",x1,y1);
} | 0 | C++ | NVD-CWE-noinfo | null | null | null | vulnerable |
String StringUtil::Implode(const Variant& items, const String& delim,
const bool checkIsContainer /* = true */) {
if (checkIsContainer && !isContainer(items)) {
throw_param_is_not_container();
}
int size = getContainerSize(items);
if (size == 0) return empty_string();
req::vector<String> sitems;
sitems.reserve(size);
size_t len = 0;
size_t lenDelim = delim.size();
for (ArrayIter iter(items); iter; ++iter) {
sitems.emplace_back(iter.second().toString());
len += sitems.back().size() + lenDelim;
}
len -= lenDelim; // always one delimiter less than count of items
assert(sitems.size() == size);
String s = String(len, ReserveString);
char *buffer = s.mutableData();
const char *sdelim = delim.data();
char *p = buffer;
String &init_str = sitems[0];
int init_len = init_str.size();
memcpy(p, init_str.data(), init_len);
p += init_len;
for (int i = 1; i < size; i++) {
String &item = sitems[i];
memcpy(p, sdelim, lenDelim);
p += lenDelim;
int lenItem = item.size();
memcpy(p, item.data(), lenItem);
p += lenItem;
}
assert(p - buffer == len);
s.setSize(len);
return s;
} | 1 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | safe |
TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
// Calculate num_entries per path
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
// Resize the decoded outputs.
TfLiteTensor* indices = GetOutput(context, node, p);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values = GetOutput(context, node, p + top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths);
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
void Compute(OpKernelContext* context) override {
const Tensor& data = context->input(0);
const Tensor& segment_ids = context->input(1);
const Tensor& num_segments = context->input(2);
if (!UnsortedSegmentReductionDoValidation(this, context, data, segment_ids,
num_segments)) {
return;
}
const auto segment_flat = segment_ids.flat<Index>();
const int64 output_rows = internal::SubtleMustCopy(static_cast<int64>(
num_segments.dtype() == DT_INT32 ? num_segments.scalar<int32>()()
: num_segments.scalar<int64>()()));
OP_REQUIRES(context, output_rows >= 0,
errors::InvalidArgument("Input num_segments == ", output_rows,
" must not be negative."));
TensorShape output_shape;
output_shape.AddDim(output_rows);
for (int i = segment_ids.dims(); i < data.dims(); i++) {
output_shape.AddDim(data.dim_size(i));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
auto output_flat = output->flat_outer_dims<T>();
auto data_flat = data.flat_inner_outer_dims<T, 2>(segment_ids.dims() - 1);
reduction_functor_(context, segment_ids.shape(), segment_flat, data_flat,
output_flat);
} | 1 | C++ | CWE-681 | Incorrect Conversion between Numeric Types | When converting from one data type to another, such as long to integer, data can be omitted or translated in a way that produces unexpected values. If the resulting values are used in a sensitive context, then dangerous behaviors may occur. | https://cwe.mitre.org/data/definitions/681.html | safe |
int pnm_validate(jas_stream_t *in)
{
uchar buf[2];
int i;
int n;
assert(JAS_STREAM_MAXPUTBACK >= 2);
/* Read the first two characters that constitute the signature. */
if ((n = jas_stream_read(in, buf, 2)) < 0) {
return -1;
}
/* Put these characters back to the stream. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough data? */
if (n < 2) {
return -1;
}
/* Is this the correct signature for a PNM file? */
if (buf[0] == 'P' && isdigit(buf[1])) {
return 0;
}
return -1;
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
DeleteRunner(ReferenceHandle& that, Local<Value> key_handle) :
key{ExternalCopy::CopyIfPrimitive(key_handle)},
context{that.context},
reference{that.reference} {
that.CheckDisposed();
if (!key) {
throw RuntimeTypeError("Invalid `key`");
}
} | 0 | C++ | CWE-913 | Improper Control of Dynamically-Managed Code Resources | The software does not properly restrict reading from or writing to dynamically-managed code resources such as variables, objects, classes, attributes, functions, or executable instructions or statements. | https://cwe.mitre.org/data/definitions/913.html | vulnerable |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* value = GetInput(context, node, kValueTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
const TfLiteTensor* dims = GetInput(context, node, kDimsTensor);
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
}
#define TF_LITE_FILL(data_type) \
reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \
GetTensorShape(output), \
GetTensorData<data_type>(output))
switch (output->type) {
case kTfLiteInt32:
TF_LITE_FILL(int32_t);
break;
case kTfLiteInt64:
TF_LITE_FILL(int64_t);
break;
case kTfLiteFloat32:
TF_LITE_FILL(float);
break;
case kTfLiteBool:
TF_LITE_FILL(bool);
break;
case kTfLiteString:
FillString(value, output);
break;
default:
context->ReportError(
context,
"Fill only currently supports int32, int64, float32, bool, string "
"for input 1, got %d.",
value->type);
return kTfLiteError;
}
#undef TF_LITE_FILL
return kTfLiteOk;
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
TEST_F(QuantizedConv2DTest, OddPadding) {
const int stride = 2;
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = (image_height * filter_count) / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
} | 0 | C++ | CWE-476 | NULL Pointer Dereference | A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit. | https://cwe.mitre.org/data/definitions/476.html | vulnerable |
static inline long decode_twos_comp(jas_ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
void LineTo(double x1,double y1) {
sprintf(outputbuffer,"\n %12.3f %12.3f l",x1,y1);
sendClean(outputbuffer);
} | 1 | C++ | NVD-CWE-noinfo | null | null | null | safe |
MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_STRING );
} | 0 | C++ | CWE-190 | Integer Overflow or Wraparound | The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control. | https://cwe.mitre.org/data/definitions/190.html | vulnerable |
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
if (index >= 0 && index < node->inputs->size) {
const int tensor_index = node->inputs->data[index];
if (tensor_index != kTfLiteOptionalTensor) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
}
return nullptr;
} | 1 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | safe |
unsigned int GetUVarBE(int nPos, int nSize, bool *pbSuccess)
{
//*pbSuccess = true;
if ( nPos < 0 || nPos + nSize > m_nLen )
{
*pbSuccess = false;
return 0;
}
unsigned int nRes = 0;
for ( int nIndex = 0; nIndex < nSize; ++nIndex )
nRes = (nRes << 8) + m_sFile[nPos + nIndex];
return nRes;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
void CNB::DoIPHdrCSO(PVOID IpHeader, ULONG EthPayloadLength) const
{
ParaNdis_CheckSumVerifyFlat(IpHeader,
EthPayloadLength,
pcrIpChecksum | pcrFixIPChecksum, FALSE,
__FUNCTION__);
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
char *QuotedString::extractFrom(char *input, char **endPtr) {
char *startPtr = input + 1; // skip the quote
char *readPtr = startPtr;
char *writePtr = startPtr;
char c;
char firstChar = *input;
char stopChar = firstChar; // closing quote is the same as opening quote
if (!isQuote(firstChar)) goto ERROR_OPENING_QUOTE_MISSING;
for (;;) {
c = *readPtr++;
if (c == '\0') goto ERROR_CLOSING_QUOTE_MISSING;
if (c == stopChar) goto SUCCESS;
if (c == '\\') {
// replace char
c = unescapeChar(*readPtr++);
if (c == '\0') goto ERROR_ESCAPE_SEQUENCE_INTERRUPTED;
}
*writePtr++ = c;
}
SUCCESS:
// end the string here
*writePtr = '\0';
// update end ptr
*endPtr = readPtr;
// return pointer to unquoted string
return startPtr;
ERROR_OPENING_QUOTE_MISSING:
ERROR_CLOSING_QUOTE_MISSING:
ERROR_ESCAPE_SEQUENCE_INTERRUPTED:
return NULL;
} | 1 | C++ | CWE-119 | Improper Restriction of Operations within the Bounds of a Memory Buffer | The software performs operations on a memory buffer, but it can read from or write to a memory location that is outside of the intended boundary of the buffer. | https://cwe.mitre.org/data/definitions/119.html | safe |
int IniParser::write_file(const string & filename, IniSection & section)
{
// ensure that the directories exist
Pathname pn(filename);
PathInfo::assert_dir (pn.dirname ());
mode_t file_umask = section.isPrivate()? 0077: 0022;
mode_t orig_umask = umask(file_umask);
// rewriting an existing file wouldnt change its mode
unlink(filename.c_str());
ofstream of(filename.c_str());
if (!of.good()) {
y2error ("Can not open file %s for write", filename.c_str());
return -1;
}
write_helper (section, of, 0);
of.close();
umask(orig_umask);
return 0;
} | 1 | C++ | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor | The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information. | https://cwe.mitre.org/data/definitions/200.html | safe |
String HHVM_FUNCTION(ldap_escape,
const String& value,
const String& ignores /* = "" */,
int flags /* = 0 */) {
char esc[256] = {};
if (flags & k_LDAP_ESCAPE_FILTER) { // llvm.org/bugs/show_bug.cgi?id=18389
esc['*'*1u] = esc['('*1u] = esc[')'*1u] = esc['\0'*1u] = esc['\\'*1u] = 1;
}
if (flags & k_LDAP_ESCAPE_DN) {
esc[','*1u] = esc['='*1u] = esc['+'*1u] = esc['<'*1u] = esc['\\'*1u] = 1;
esc['>'*1u] = esc[';'*1u] = esc['"'*1u] = esc['#'*1u] = 1;
}
if (!flags) {
memset(esc, 1, sizeof(esc));
}
for (int i = 0; i < ignores.size(); i++) {
esc[(unsigned char)ignores[i]] = 0;
}
char hex[] = "0123456789abcdef";
String result(3 * value.size(), ReserveString);
char *rdata = result.get()->mutableData(), *r = rdata;
for (int i = 0; i < value.size(); i++) {
auto c = (unsigned char)value[i];
if (esc[c]) {
*r++ = '\\';
*r++ = hex[c >> 4];
*r++ = hex[c & 0xf];
} else {
*r++ = c;
}
}
result.setSize(r - rdata);
return result;
} | 0 | C++ | CWE-787 | Out-of-bounds Write | The software writes data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/787.html | vulnerable |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
ruy::profiler::ScopeLabel label("SquaredDifference");
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32) {
EvalSquaredDifference<float>(context, node, data, input1, input2, output);
} else if (output->type == kTfLiteInt32) {
EvalSquaredDifference<int32_t>(context, node, data, input1, input2, output);
} else {
context->ReportError(
context,
"SquaredDifference only supports FLOAT32 and INT32 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
void UTFstring::UpdateFromUTF8()
{
delete [] _Data;
// find the size of the final UCS-2 string
size_t i;
const size_t SrcLength = UTF8string.length();
for (_Length=0, i=0; i<SrcLength; _Length++) {
const unsigned int CharLength = UTFCharLength(static_cast<uint8>(UTF8string[i]));
if ((CharLength >= 1) && (CharLength <= 4))
i += CharLength;
else
// Invalid size?
break;
}
_Data = new wchar_t[_Length+1];
size_t j;
for (j=0, i=0; i<SrcLength; j++) {
const uint8 lead = static_cast<uint8>(UTF8string[i]);
const unsigned int CharLength = UTFCharLength(lead);
if ((CharLength < 1) || (CharLength > 4))
// Invalid char?
break;
if ((i + CharLength) > SrcLength)
// Guard against invalid memory access beyond the end of the
// source buffer.
break;
if (CharLength == 1)
_Data[j] = lead;
else if (CharLength == 2)
_Data[j] = ((lead & 0x1F) << 6) + (UTF8string[i+1] & 0x3F);
else if (CharLength == 3)
_Data[j] = ((lead & 0x0F) << 12) + ((UTF8string[i+1] & 0x3F) << 6) + (UTF8string[i+2] & 0x3F);
else if (CharLength == 4)
_Data[j] = ((lead & 0x07) << 18) + ((UTF8string[i+1] & 0x3F) << 12) + ((UTF8string[i+2] & 0x3F) << 6) + (UTF8string[i+3] & 0x3F);
i += CharLength;
}
_Data[j] = 0;
} | 1 | C++ | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor | The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information. | https://cwe.mitre.org/data/definitions/200.html | safe |
bool CModules::GetModPathInfo(CModInfo& ModInfo, const CString& sModule,
const CString& sModPath, CString& sRetMsg) {
if (!ValidateModuleName(sModule, sRetMsg)) {
return false;
}
ModInfo.SetName(sModule);
ModInfo.SetPath(sModPath);
ModHandle p = OpenModule(sModule, sModPath, ModInfo, sRetMsg);
if (!p) return false;
dlclose(p);
return true;
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
/* Create the server instance directory. We only need to write to this
* directory for these reasons:
* 1. Initial population of structure files (structure_version.txt, instance.pid).
* 2. Creating/removing a generation directory.
* 3. Removing the entire server instance directory (after all
* generations are removed).
*
* 1 and 2 are done by the helper server during initialization and before lowering
* privilege. 3 is done during helper server shutdown by a cleanup process that's
* running as the same user the helper server was running as before privilege
* lowering.
* Therefore, we make the directory only writable by the user the helper server
* was running as before privilege is lowered. Everybody else has read and execute
* rights though, because we want admin tools to be able to list the available
* generations no matter what user they're running as.
*/
if (owner) {
switch (getFileTypeNoFollowSymlinks(path)) {
case FT_NONEXISTANT:
createDirectory(path);
break;
case FT_DIRECTORY:
verifyDirectoryPermissions(path);
break;
default:
throw RuntimeException("'" + path + "' already exists, and is not a directory");
}
} else if (getFileType(path) != FT_DIRECTORY) {
throw RuntimeException("Server instance directory '" + path +
"' does not exist");
}
} | 0 | C++ | NVD-CWE-Other | Other | NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset. | https://nvd.nist.gov/vuln/categories | vulnerable |
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* seq_lengths_tensor =
GetInput(context, node, kSeqLengthsTensor);
const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor);
auto* params =
reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data);
int seq_dim = params->seq_dim;
int batch_dim = params->batch_dim;
TF_LITE_ENSURE(context, seq_dim >= 0);
TF_LITE_ENSURE(context, batch_dim >= 0);
TF_LITE_ENSURE(context, seq_dim != batch_dim);
TF_LITE_ENSURE(context, seq_dim < NumDimensions(input));
TF_LITE_ENSURE(context, batch_dim < NumDimensions(input));
TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0),
SizeOfDimension(input, batch_dim));
for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) {
TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim));
}
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
reference_ops::ReverseSequence<T, TS>(
seq_lengths, seq_dim, batch_dim, GetTensorShape(input),
GetTensorData<T>(input), GetTensorShape(output),
GetTensorData<T>(output));
return kTfLiteOk;
} | 0 | C++ | CWE-125 | Out-of-bounds Read | The software reads data past the end, or before the beginning, of the intended buffer. | https://cwe.mitre.org/data/definitions/125.html | vulnerable |
static inline bool CheckForOverflow(double as_double, T* out) {
return (sizeof(T) < sizeof(double) && std::isinf(*out) &&
std::isfinite(as_double));
} | 1 | C++ | CWE-20 | Improper Input Validation | The product receives input or data, but it does
not validate or incorrectly validates that the input has the
properties that are required to process the data safely and
correctly. | https://cwe.mitre.org/data/definitions/20.html | safe |
folly::Optional<TLSMessage> EncryptedReadRecordLayer::read(
folly::IOBufQueue& buf) {
auto decryptedBuf = getDecryptedBuf(buf);
if (!decryptedBuf) {
return folly::none;
}
TLSMessage msg;
// Iterate over the buffers while trying to find
// the first non-zero octet. This is much faster than
// first iterating and then trimming.
auto currentBuf = decryptedBuf->get();
bool nonZeroFound = false;
do {
currentBuf = currentBuf->prev();
size_t i = currentBuf->length();
while (i > 0 && !nonZeroFound) {
nonZeroFound = (currentBuf->data()[i - 1] != 0);
i--;
}
if (nonZeroFound) {
msg.type = static_cast<ContentType>(currentBuf->data()[i]);
}
currentBuf->trimEnd(currentBuf->length() - i);
} while (!nonZeroFound && currentBuf != decryptedBuf->get());
if (!nonZeroFound) {
throw std::runtime_error("No content type found");
}
msg.fragment = std::move(*decryptedBuf);
switch (msg.type) {
case ContentType::handshake:
case ContentType::alert:
case ContentType::application_data:
break;
default:
throw std::runtime_error(folly::to<std::string>(
"received encrypted content type ",
static_cast<ContentTypeType>(msg.type)));
}
if (!msg.fragment || msg.fragment->empty()) {
if (msg.type == ContentType::application_data) {
msg.fragment = folly::IOBuf::create(0);
} else {
throw std::runtime_error("received empty fragment");
}
}
return msg;
} | 0 | C++ | CWE-770 | Allocation of Resources Without Limits or Throttling | The software allocates a reusable resource or group of resources on behalf of an actor without imposing any restrictions on the size or number of resources that can be allocated, in violation of the intended security policy for that actor. | https://cwe.mitre.org/data/definitions/770.html | vulnerable |
bool chopOffDotted(string &domain)
{
if(domain.empty() || (domain.size()==1 && domain[0]=='.'))
return false;
string::size_type fdot=domain.find('.');
if(fdot == string::npos)
return false;
if(fdot==domain.size()-1)
domain=".";
else {
string::size_type remain = domain.length() - (fdot + 1);
char tmp[remain];
memcpy(tmp, domain.c_str()+fdot+1, remain);
domain.assign(tmp, remain);
}
return true;
} | 0 | C++ | CWE-399 | Resource Management Errors | Weaknesses in this category are related to improper management of system resources. | https://cwe.mitre.org/data/definitions/399.html | vulnerable |
BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
/* create the WBMP */
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return;
}
/* fill up the WBMP structure */
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
/* write the WBMP to a gd file descriptor */
if(writewbmp(wbmp, &gd_putout, out)) {
gd_error("Could not save WBMP\n");
}
/* des submitted this bugfix: gdFree the memory. */
freewbmp(wbmp);
} | 0 | C++ | CWE-415 | Double Free | The product calls free() twice on the same memory address, potentially leading to modification of unexpected memory locations. | https://cwe.mitre.org/data/definitions/415.html | vulnerable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.