message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Prevent unsafe memcpy
Some tests cause a zero length input or output, which can mean the
allocated test output buffers can be zero length. Protect against
calling memcpy blindly in these situations. | @@ -3292,7 +3292,10 @@ void aead_multipart_encrypt( int key_type_arg, data_t *key_data,
part_length, part_data,
part_data_size, &output_part_length ) );
+ if( output_data && output_part_length )
+ {
memcpy( ( output_data + part_offset ), part_data, output_part_length );
+ }
part_offset += part_length;
output_length += output_part_length;
@@ -3312,13 +3315,19 @@ void aead_multipart_encrypt( int key_type_arg, data_t *key_data,
tag_buffer, tag_length,
&tag_size ) );
+ if( output_data && output_part_length )
+ {
memcpy( ( output_data + output_length ), final_data, output_part_length );
+ }
TEST_EQUAL(tag_length, tag_size);
output_length += output_part_length;
+ if( output_data && tag_length )
+ {
memcpy( ( output_data + output_length ), tag_buffer, tag_length );
+ }
output_length += tag_length;
@@ -3516,7 +3525,10 @@ void aead_multipart_encrypt_decrypt( int key_type_arg, data_t *key_data,
goto exit;
}
+ if( output_data && output_part_length )
+ {
memcpy( ( output_data + part_offset ), part_data, output_part_length );
+ }
part_offset += part_length;
output_length += output_part_length;
@@ -3547,7 +3559,10 @@ void aead_multipart_encrypt_decrypt( int key_type_arg, data_t *key_data,
goto exit;
}
+ if( output_data &&output_part_length )
+ {
memcpy( ( output_data + output_length ), final_data, output_part_length );
+ }
output_length += output_part_length;
@@ -3666,7 +3681,10 @@ void aead_multipart_encrypt_decrypt( int key_type_arg, data_t *key_data,
part_length, part_data,
part_data_size, &output_part_length ) );
+ if( output_data2 && output_part_length )
+ {
memcpy( ( output_data2 + part_offset ), part_data, output_part_length );
+ }
part_offset += part_length;
output_length2 += output_part_length;
@@ -3684,7 +3702,10 @@ void aead_multipart_encrypt_decrypt( int key_type_arg, data_t *key_data,
&output_part_length,
tag_buffer, tag_length ) );
+ if( output_data2 && output_part_length )
+ {
memcpy( ( output_data2 + output_length2 ), final_data, output_part_length);
+ }
output_length2 += output_part_length;
@@ -3872,7 +3893,10 @@ void aead_multipart_decrypt( int key_type_arg, data_t *key_data,
goto exit;
}
+ if( output_data && output_part_length )
+ {
memcpy( ( output_data + part_offset ), part_data, output_part_length );
+ }
part_offset += part_length;
output_length += output_part_length;
@@ -3903,7 +3927,10 @@ void aead_multipart_decrypt( int key_type_arg, data_t *key_data,
goto exit;
}
+ if( output_data && output_part_length )
+ {
memcpy( ( output_data + output_length ), final_data, output_part_length );
+ }
output_length += output_part_length;
|
Fix copy so that plugins can work on mac | @@ -666,12 +666,21 @@ elseif(APPLE)
endif()
function(move_lib)
if(TARGET ${ARGV0})
+ get_target_property(TARGET_TYPE ${ARGV0} TYPE)
+ if(${TARGET_TYPE} STREQUAL "MODULE_LIBRARY")
+ add_custom_command(TARGET lovr POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy
+ $<TARGET_FILE:${ARGV0}>
+ ${EXE_DIR}/$<TARGET_FILE_NAME:${ARGV0}>
+ )
+ else()
add_custom_command(TARGET lovr POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
$<TARGET_SONAME_FILE:${ARGV0}>
${EXE_DIR}/$<TARGET_SONAME_FILE_NAME:${ARGV0}>
)
endif()
+ endif()
endfunction()
move_lib(${LOVR_GLFW})
move_lib(${LOVR_LUA})
|
Fixed NULL pointer error, if no label is provided after the rule keyword. | @@ -237,7 +237,13 @@ int ruledata()
{
case -1: err = 201; /* Unrecognized keyword */
break;
- case r_RULE: Nrules++;
+ case r_RULE: /* Missing the rule label -> set error */
+ if (parser->Ntokens != 2)
+ {
+ err = 201;
+ break;
+ }
+ Nrules++;
newrule();
RuleState = r_RULE;
break;
|
vere: fix refcounting in _ames_forward
Make sure we fully lose the list of lanes that's passed in, not just the
individual items. | @@ -627,12 +627,13 @@ _ames_forward(u3_panc* pac_u, u3_noun las)
}
{
+ u3_noun los = las;
u3_noun pac = _ames_serialize_packet(pac_u, c3y);
while (u3_nul != las) {
- _ames_ef_send(pac_u->sam_u, u3h(las), u3k(pac));
+ _ames_ef_send(pac_u->sam_u, u3k(u3h(las)), u3k(pac));
las = u3t(las);
}
- u3z(pac);
+ u3z(los); u3z(pac);
}
pac_u->sam_u->foq_d--;
|
fix casts in readUInt shifts | @@ -920,8 +920,8 @@ static inline uint32_t
readUInt (const uint8_t* b)
{
return (
- ((uint32_t) (b[0])) | ((uint32_t) (b[1] << 8)) |
- ((uint32_t) (b[2] << 16)) | ((uint32_t) (b[3] << 24)));
+ ((uint32_t) b[0]) | (((uint32_t) b[1]) << 8u) |
+ (((uint32_t) b[2]) << 16u) | (((uint32_t) b[3]) << 24u));
}
/**************************************/
|
stm32/boards/PYBD_SF2: Configure LEDs as inverted, for LED.intensity(). | @@ -154,6 +154,7 @@ extern struct _spi_bdev_t spi_bdev2;
#define MICROPY_HW_USRSW_PRESSED (0)
// LEDs
+#define MICROPY_HW_LED_INVERTED (1) // LEDs are on when pin is driven low
#define MICROPY_HW_LED1 (pyb_pin_LED_RED)
#define MICROPY_HW_LED2 (pyb_pin_LED_GREEN)
#define MICROPY_HW_LED3 (pyb_pin_LED_BLUE)
|
Fix tool script for gcc build in cygwin | @@ -187,7 +187,11 @@ build_target() {
case "$1" in
debug)
build_subdir=debug
- add cc_flags -DDEBUG -ggdb -fsanitize=address -fsanitize=undefined
+ add cc_flags -DDEBUG -ggdb
+ # cygwin gcc doesn't seem to have this stuff, just elide for now
+ if [[ $os != cygwin ]]; then
+ add cc_flags -fsanitize=address -fsanitize=undefined
+ fi
if [[ $os = mac ]]; then
# Our mac clang does not have -Og
add cc_flags -O1
|
MUCH better tracking
Works with 2 lighthouses.
Tracking from both lighthouses agree *much* better than before
Inverting the tracker no longer screws up tracking
Still much work to do to remove all axis angle and
speed up/ make predictable the algorithm to estimate the rotation of the
LH relative to the tracked object. | @@ -1161,8 +1161,8 @@ static void RefineRotationEstimateQuaternion(FLT *rotOut, Point lhPoint, FLT *in
//#ifdef TORI_DEBUG
//printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]);
//#endif
- g *= 1.02;
- printf("+");
+ g *= 1.04;
+ //printf("+");
//WhereIsTheTrackedObjectQuaternion(rotOut, lhPoint);
}
else
@@ -1171,7 +1171,8 @@ static void RefineRotationEstimateQuaternion(FLT *rotOut, Point lhPoint, FLT *in
//printf("- , %f\n", point4[3]);
//#endif
g *= 0.7;
- printf("-");
+ //printf("-");
+ //printf("%3f", lastMatchFitness);
}
@@ -1404,8 +1405,6 @@ static Point SolveForLighthouse(FLT posOut[3], FLT quatOut[4], TrackedObject *ob
// toriData->lastLhRotQuat[lh][3] = rotQuat[3];
//}
- WhereIsTheTrackedObjectAxisAngle(objPos2, rot, refinedEstimateGd);
- WhereIsTheTrackedObjectQuaternion(objPos, rotQuat, refinedEstimateGd);
FLT rotQuat2[4];
@@ -1415,6 +1414,10 @@ static Point SolveForLighthouse(FLT posOut[3], FLT quatOut[4], TrackedObject *ob
axisanglefromquat(&(rot2[3]), rot2, rotQuat);
+// WhereIsTheTrackedObjectAxisAngle(objPos, rot, refinedEstimateGd); // this is the original axis angle one
+ WhereIsTheTrackedObjectAxisAngle(objPos, rot2, refinedEstimateGd); // this one is axis angle, but using data derived by quaternions.
+ // WhereIsTheTrackedObjectQuaternion(objPos, rotQuat, refinedEstimateGd); <--------------This is hte one we need to use, might need to be fixed.
+
//{
//FLT tmpPos[3] = {refinedEstimateGd.x, refinedEstimateGd.y, refinedEstimateGd.z};
|
Test including `<gui/gui.hpp>` in TravisCI. | @@ -38,6 +38,7 @@ before_script:
script:
- 'if [[ "$BUILD_TOOL" == "autotools" ]]; then echo "#include <libtcod.h>" | gcc -xc -c -I$HOME/.local/include/libtcod -; fi'
- 'if [[ "$BUILD_TOOL" == "autotools" ]]; then echo "#include <libtcod.hpp>" | gcc -xc++ -c -I$HOME/.local/include/libtcod -; fi'
+- 'if [[ "$BUILD_TOOL" == "autotools" ]]; then echo "#include <gui/gui.hpp>" | gcc -xc++ -c -I$HOME/.local/include/libtcod -; fi'
- 'if [[ "$BUILD_TOOL" == "autotools" ]]; then (cd build/autotools && make check); fi'
- 'if [[ "$BUILD_TOOL" == "scons" ]]; then LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR ./unittest; fi'
- (cd python && pytest -v)
|
commit: parse_commit_options returns an int
This looks like an oversight from some years ago when we were extracting
functionality. This function returns an `int` and not a `VALUE` like it states. | @@ -425,7 +425,7 @@ struct commit_data {
* Note that parents may be set even when the function errors, so make
* sure to free this data.
*/
-static VALUE parse_commit_options(struct commit_data *out, git_repository *repo, VALUE rb_data)
+static int parse_commit_options(struct commit_data *out, git_repository *repo, VALUE rb_data)
{
VALUE rb_message, rb_tree, rb_parents, rb_ref;
int error = 0, parent_count, i;
|
Clear all the notify messages in cdbconn_discardResults()
Currently, notify messages are not used in GPDB by QE's except for sequence
nextval messages. While cleaning the connection for reuse best to remove all the
notify messages as well to be safe than sorry even later if we start using it
for more things. | @@ -572,6 +572,7 @@ cdbconn_discardResults(SegmentDatabaseDescriptor *segdbDesc,
PGresult *pRes = NULL;
ExecStatusType stat;
int i = 0;
+ bool retval = true;
/* PQstatus() is smart enough to handle NULL */
while (NULL != (pRes = PQgetResult(segdbDesc->conn)))
@@ -584,13 +585,31 @@ cdbconn_discardResults(SegmentDatabaseDescriptor *segdbDesc,
PQerrorMessage(segdbDesc->conn));
if (stat == PGRES_FATAL_ERROR || stat == PGRES_BAD_RESPONSE)
- return true;
+ {
+ retval = true;
+ break;
+ }
if (i++ > retryCount)
- return false;
+ {
+ retval = false;
+ break;
+ }
+ }
+
+ /*
+ * Clear of all the notify messages as well.
+ */
+ PGnotify *notify = segdbDesc->conn->notifyHead;
+ while (notify != NULL)
+ {
+ PGnotify *prev = notify;
+ notify = notify->next;
+ PQfreemem(prev);
}
+ segdbDesc->conn->notifyHead = segdbDesc->conn->notifyTail = NULL;
- return true;
+ return retval;
}
/* Return if it's a bad connection */
|
pbio/doc/control: Copy-edit math.
Fix a few typos. | @@ -506,7 +506,7 @@ gray regions of Figure \ref{fig:positions}, which holds if:
%
\begin{align}
\label{eq:a:decreaselimit}
- - \dfrac{1}{2 \a_2}\lr{\w_0^2 - \w_3^2} < \th_3 - \th_0
+ \dfrac{1}{2 \a_2}\lr{\w_3^2 - \w_0^2} < \th_3 - \th_0
\end{align}
%
If it holds, then there is a nonzero constant speed phase ($\th_1 \neq \th_2$)
@@ -534,15 +534,15 @@ can feasibly slow down without overshooting. To handle this, we cut the
initial speed value to within the allowed bound causing an abrupt deceleration:
%
\begin{align}
- \w_0 := \sqrt{\w_3^2 - \dfrac{1}{2\a_2}\lr{\th_3-\th_0}}
+ \w_0 := \sqrt{\w_3^2 - 2\a_2\lr{\th_3-\th_0}}
\end{align}
%
By definition, this means that there is only a deceleration phase, so
-$\th_1 = \th_2 = \th_3$, $\w_1=\w_2=\w_3$, $t_1=t_2=t_3$ with:
+$\th_1 = \th_2 = \th_0$, $\w_1=\w_2=\w_0$, $t_1=t_2=t_0$ with:
%
\begin{align}
\label{eq:a:decreaselimittime}
- t_1 = \dfrac{\w_0-\w_3}{\a_2}
+ t_3 - t_2 = \dfrac{\w_0-\w_3}{\a_2}
\end{align}
\subsubsection{Selecting the standard, cut short, or decreasing case}
@@ -558,8 +558,6 @@ The procedure is then as follows:
\ref{sec:a:cutshortw3is0} or \ref{sec:a:cutshortw3isw1}).
\end{enumerate}
%
-For efficiency, the decreasing and standard case can share the same code if the
-sign of $\a_0$ is set appropriately.
\subsubsection{Reversing and unreversing the final and target speed}
\label{sec:a:reversing}
@@ -570,7 +568,7 @@ calculate the trajectory, and map the final result back to obtain the
originally requested command.
\begin{itemize}
- \item Let the boolean $a := \wt < 0$.
+ \item Let the boolean $a := \th_3 < \th_0$.
\item If $a$, then invert targets as:
$\th_3 := 2 \th_0 - \th_3$, $\wt := -\wt$, $\w_0 := -\w_0$,
$\w_3 := -\w_3$.
|
added assertions for assumptions in the IR | @@ -49,7 +49,7 @@ end
-- Does not currently recognize non-trivial constant expressions as being constant.
function constant_propagation.run(module)
- -- 1) Find what toplevel variables are initialized to a constant.
+ -- 1) Find which variables are initialized to a constant.
local data_of_func = {} -- list of FuncData
for _, func in ipairs(module.functions) do
@@ -79,14 +79,23 @@ function constant_propagation.run(module)
next_f.constant_val_of_upvalue[u_id] = const_init
elseif value._tag == "ir.Value.Upvalue" then
+ -- A `NewClosure` or `SetUpvalues` instruction can only reference values in outer scopes,
+ -- which exist in surrounding functions that have a numerically lesser `f_id`.
+ -- Due to this, we can reliable tie the constant initializer of an inner upvalue in a nested
+ -- function to the constantant initializer of the outer upvalue that it captures.
local const_init = f_data.constant_val_of_upvalue[value.id]
next_f.constant_val_of_upvalue[u_id] = const_init
+
+ else
+ typedecl.tag_error(value._tag)
end
end
end
end
+
+
for loc_id = 1, #func.typ.arg_types do
f_data.locvar_constant_init[loc_id] = false
end
@@ -101,13 +110,16 @@ function constant_propagation.run(module)
for cmd in ir.iter(func.body) do
local tag = cmd._tag
if tag == "ir.Cmd.SetUpvalues" then
- for u_id, value in ipairs(cmd.srcs) do
local next_f = assert(data_of_func[cmd.f_id])
-
- if value._tag == "ir.Value.LocalVar" and n_writes[value.id] ~= 1 then
+ for u_id, value in ipairs(cmd.srcs) do
+ if value._tag == "ir.Value.LocalVar" then
+ if n_writes[value.id] ~= 1 then
next_f.constant_val_of_upvalue[u_id] = false
+ end
elseif value._tag == "ir.Value.Upvalue" then
next_f.constant_val_of_upvalue[u_id] = f_data.constant_val_of_upvalue[value.id]
+ else
+ typedecl.tag_error(value._tag)
end
end
@@ -118,6 +130,32 @@ function constant_propagation.run(module)
end
end
end
+
+ -- Because of the way the previous compiler passes work, it is guaranteed that an upvalue that has a
+ -- constant initializer always references a local variable with a write count of 1. In other words,
+ -- IR like this is currently not possible:
+ -- ```
+ -- x1 <- 10
+ -- loop {
+ -- x2 = NewClosure()
+ -- x2.upvalues <- x1
+ -- x1 <- 20
+ -- }
+ -- ```
+ -- Since x1 is a "mutable upvalue", the assignment_conversion pass turns it into a record type.
+ -- With this loop, we assert this assumption.
+ for cmd in ir.iter(func.body) do
+ local tag = cmd._tag
+ if tag == "ir.Cmd.SetUpvalues" then
+ local next_f = assert(data_of_func[cmd.f_id])
+ for u_id, value in ipairs(cmd.srcs) do
+ if value._tag == "ir.Value.LocalVar" and next_f.constant_val_of_upvalue[u_id] then
+ assert(n_writes[value.id] == 1)
+ end
+ end
+ end
+ end
+
end
-- 3) Remove propagated upvalues from the capture list.
|
Fix win32u typo | @@ -2064,7 +2064,7 @@ VOID PhpGenerateSyscallLists(
{
static PH_STRINGREF ntdllPath = PH_STRINGREF_INIT(L"\\SystemRoot\\System32\\ntdll.dll");
static PH_STRINGREF win32kPath = PH_STRINGREF_INIT(L"\\SystemRoot\\System32\\win32k.sys");
- static PH_STRINGREF win32uPath = PH_STRINGREF_INIT(L"\\SystemRoot\\System32\\win32u.sys");
+ static PH_STRINGREF win32uPath = PH_STRINGREF_INIT(L"\\SystemRoot\\System32\\win32u.dll");
PPH_LIST ntdllSystemCallList = NULL;
PPH_LIST win32kSystemCallList = NULL;
PH_MAPPED_IMAGE mappedImage;
|
cast neg cache stats to long long | @@ -139,8 +139,8 @@ set_neg_cache_stats(struct worker* worker, struct ub_server_stats* svr,
return;
neg = ve->neg_cache;
lock_basic_lock(&neg->lock);
- svr->num_neg_cache_noerror = neg->num_neg_cache_noerror;
- svr->num_neg_cache_nxdomain = neg->num_neg_cache_nxdomain;
+ svr->num_neg_cache_noerror = (long long)neg->num_neg_cache_noerror;
+ svr->num_neg_cache_nxdomain = (long long)neg->num_neg_cache_nxdomain;
if(reset && !worker->env.cfg->stat_cumulative) {
neg->num_neg_cache_noerror = 0;
neg->num_neg_cache_nxdomain = 0;
|
unit-test/test_keystore: fix C lints
```
/bitbox02-firmware/test/unit-test/test_keystore.c:229:22: error: 4th argument 'host_nonce_commitment' (passed to 'host_commitment') looks like it might be swapped with the 5th, 'commitment' (passed to 'client_commitment_out') [readability-suspicious-call-argument,-warnings-as-errors]
assert_false(keys
``` | @@ -219,24 +219,32 @@ static void _test_keystore_secp256k1_nonce_commit(void** state)
{
uint8_t msg[32] = {0};
memset(msg, 0x88, sizeof(msg));
- uint8_t commitment[EC_PUBLIC_KEY_LEN] = {0};
- uint8_t host_nonce_commitment[32] = {0};
- memset(host_nonce_commitment, 0xAB, sizeof(host_nonce_commitment));
+ uint8_t client_commitment[EC_PUBLIC_KEY_LEN] = {0};
+ uint8_t host_commitment[32] = {0};
+ memset(host_commitment, 0xAB, sizeof(host_commitment));
{
keystore_mock_unlocked(NULL, 0, NULL);
// fails because keystore is locked
assert_false(keystore_secp256k1_nonce_commit(
- _keypath, sizeof(_keypath) / sizeof(uint32_t), msg, host_nonce_commitment, commitment));
+ _keypath,
+ sizeof(_keypath) / sizeof(uint32_t),
+ msg,
+ host_commitment,
+ client_commitment));
}
{
keystore_mock_unlocked(_mock_seed, sizeof(_mock_seed), _mock_bip39_seed);
assert_true(keystore_secp256k1_nonce_commit(
- _keypath, sizeof(_keypath) / sizeof(uint32_t), msg, host_nonce_commitment, commitment));
+ _keypath,
+ sizeof(_keypath) / sizeof(uint32_t),
+ msg,
+ host_commitment,
+ client_commitment));
const uint8_t expected_commitment[EC_PUBLIC_KEY_LEN] =
"\x02\xfd\xcf\x79\xf9\xc0\x3f\x6a\xcc\xc6\x56\x95\xa1\x90\x82\xe3\x0b\xfb\x9e\xdc\x93"
"\x04\x5a\x03\x05\x8a\x99\x09\xe4\x9b\x1a\x37\x7b";
- assert_memory_equal(expected_commitment, commitment, sizeof(commitment));
+ assert_memory_equal(expected_commitment, client_commitment, sizeof(client_commitment));
}
}
|
Redact x-amz-security-token header in errors.
This header should not be displayed to the user in error output, even if it is useless by itself. | @@ -861,6 +861,7 @@ storageS3New(
driver->headerRedactList = strLstNew();
strLstAdd(driver->headerRedactList, HTTP_HEADER_AUTHORIZATION_STR);
strLstAdd(driver->headerRedactList, S3_HEADER_DATE_STR);
+ strLstAdd(driver->headerRedactList, S3_HEADER_TOKEN_STR);
this = storageNew(
STORAGE_S3_TYPE_STR, path, 0, 0, write, pathExpressionFunction, driver, driver->interface);
|
Update readme.md
Fixed link to Crundal's presentation | @@ -390,7 +390,7 @@ how the design of _tbb_ avoids the false cache line sharing.
Available at <https://github.com/kuszmaul/SuperMalloc/tree/master/tests>
- \[6] Timothy Crundal. _Reducing Active-False Sharing in TCMalloc._
- 2016. <http://courses.cecs.anu.edu.au/courses/CSPROJECTS/16S1/Reports/Timothy*Crundal*Report.pdf>. CS16S1 project at the Australian National University.
+ 2016. <http://courses.cecs.anu.edu.au/courses/CSPROJECTS/16S1/Reports/Timothy_Crundal_Report.pdf>. CS16S1 project at the Australian National University.
- \[7] Alexey Kukanov, and Michael J Voss.
_The Foundations for Scalable Multi-Core Software in Intel Threading Building Blocks._
|
[scripts] Split trace at `trace` csr access | @@ -751,14 +751,8 @@ def main():
'cfg_buf': deque(),
'curr_cfg': None
}
- # all values initially 0, also 'start' time of measurement 0
- perf_metrics_bench = [defaultdict(int)]
- # all values initially 0, also 'start' time of measurement 0
- perf_metrics_setup = [defaultdict(int)]
- perf_metrics_bench[0]['start'] = None
- perf_metrics_setup[0]['start'] = None
- # Initial code belongs to setup phase
- perf_metrics = perf_metrics_setup
+ perf_metrics = [defaultdict(int)]
+ perf_metrics[0]['start'] = None
section = 0
# Parse input line by line
for line in line_iter:
@@ -768,33 +762,27 @@ def main():
False, time_info, args.offl, not args.saddr, args.permissive)
if perf_metrics[0]['start'] is None:
perf_metrics[0]['start'] = time_info[1]
- # Start a new benchmark section after 'csrw cycle' instruction
- if 'cycle' in line:
+ # Start a new benchmark section after 'csrw trace' instruction
+ if 'trace' in line:
perf_metrics[-1]['end'] = time_info[1]
perf_metrics.append(defaultdict(int))
- if 'csrw' in ann_insn:
- # Start of a benchmark section
- perf_metrics = perf_metrics_bench
perf_metrics[-1]['section'] = section
- else:
- # End of a benchmark section
- perf_metrics = perf_metrics_setup
- section += 1
perf_metrics[-1]['start'] = time_info[1]
+ section += 1
if not empty:
print(ann_insn)
else:
break # Nothing more in pipe, EOF
args.infile.close()
perf_metrics[-1]['end'] = time_info[1]
- # Evaluate only the benchmarks
- if perf_metrics_bench[0]['start'] is not None:
- perf_metrics = perf_metrics_bench
- else:
- perf_metrics = perf_metrics_setup
# Remove last emtpy entry
- if not bool(perf_metrics[-1]):
- perf_metrics.pop()
+ if perf_metrics[-1]['start'] == perf_metrics[-1]['end']:
+ perf_metrics = perf_metrics[:-1]
+ if not perf_metrics or perf_metrics[0]['start'] is None:
+ # Empty list
+ sys.stderr.write('WARNING: Empty trace file ({}).\n'
+ .format(args.infile.name))
+ return 0
# Compute metrics
eval_perf_metrics(perf_metrics, core_id)
# Add metadata
|
Fix mirror PowerShell Script (try 2) | @@ -35,7 +35,7 @@ git reset --hard origin/$Branch
# Push to the AzDO repo.
$Result = (git push azdo-mirror $Branch)
-if ($Result.Contains("Head is now at")) {
+if (($Result -as [String]).Contains("Head is now at")) {
Write-Host "Successfully mirrored latest changes to https://mscodehub.visualstudio.com/msquic/_git/msquic"
} else {
Write-Error $Result
|
Make match macro prettier. | @@ -1079,7 +1079,7 @@ value, one key will be ignored."
[pattern expr onmatch seen]
(cond
- (and (symbol? pattern) (not (keyword? pattern)))
+ (symbol? pattern)
(if (get seen pattern)
~(if (= ,pattern ,expr) ,(onmatch) ,sentinel)
(do
@@ -1138,8 +1138,7 @@ value, one key will be ignored."
((fn aux [i]
(cond
(= i len-1) (get cases i)
- (< i len-1) (do
- (def $res (gensym))
+ (< i len-1) (with-syms [$res]
~(if (= ,sentinel (def ,$res ,(match-1 (get cases i) $x (fn [] (get cases (inc i))) @{})))
,(aux (+ 2 i))
,$res)))) 0)))
|
bricks/movehub: Disable iodevices module.
This saves about 500 bytes now, and more considering future additions like UARTDevice and I2CDevice. | #define PYBRICKS_PY_EXPERIMENTAL (0)
#define PYBRICKS_PY_GEOMETRY (0)
#define PYBRICKS_PY_HUBS (1)
-#define PYBRICKS_PY_IODEVICES (1)
+#define PYBRICKS_PY_IODEVICES (0)
#define PYBRICKS_PY_MEDIA (0)
#define PYBRICKS_PY_PARAMETERS (1)
#define PYBRICKS_PY_PARAMETERS_BUTTON (1)
|
[core] comment out ck_getenv_s() (unused)
ck_getenv_s() not currently used in lighttpd;
lighttpd process env is stable | @@ -149,6 +149,7 @@ ck_memclear_s (void * const s, const rsize_t smax, rsize_t n)
}
+#if 0 /*(not currently used in lighttpd; lighttpd process env is stable)*/
errno_t
ck_getenv_s (size_t * const restrict len,
char * const restrict value, const rsize_t maxsize,
@@ -198,6 +199,7 @@ ck_getenv_s (size_t * const restrict len,
#endif
}
+#endif
errno_t
|
FlatValuePool should use a vector of items | @@ -45,10 +45,11 @@ struct FlatValuePool
};
struct Item
{
+ HashType hash;
uint32_t useCount;
int32_t tdbOffset;
- Item(int32_t aTDBOffset);
+ Item(HashType aHash, int32_t aTDBOffset);
void DecUseCount();
void IncUseCount();
RED4ext::TweakDB::FlatValue* ToFlatValue();
@@ -70,7 +71,7 @@ struct FlatValuePool
private:
Type poolType;
- std::map<HashType, std::vector<Item>> itemPools;
+ std::vector<Item> items;
};
bool flatValuePoolsInitialized = false;
std::vector<FlatValuePool> flatValuePools;
@@ -286,8 +287,9 @@ bool TweakDB::UpdateRecord(sol::object aValue)
}
}
-FlatValuePool::Item::Item(int32_t aTDBOffset)
- : useCount(0),
+FlatValuePool::Item::Item(HashType aHash, int32_t aTDBOffset)
+ : hash(aHash),
+ useCount(0),
tdbOffset(aTDBOffset)
{
}
@@ -326,12 +328,10 @@ FlatValuePool::Item* FlatValuePool::Get(const RED4ext::CStackType& acStackType)
FlatValuePool::Item* FlatValuePool::Get(const RED4ext::CStackType& acStackType, HashType aHash)
{
- const auto it = itemPools.find(aHash);
- if (it == itemPools.end())
- return nullptr;
-
- for (auto& item : it->second)
+ for (auto& item : items)
{
+ if (item.hash != aHash) continue;
+
const auto* pFlatValue = item.ToFlatValue();
RED4ext::CStackType poolStackType;
pFlatValue->GetValue(&poolStackType);
@@ -361,17 +361,6 @@ FlatValuePool::Item* FlatValuePool::GetOrCreate(const RED4ext::CStackType& acSta
if (pItem != nullptr)
return pItem;
- std::vector<Item>* pItemPool;
- const auto it = itemPools.find(aHash);
- if (it == itemPools.end())
- {
- pItemPool = &itemPools.emplace(aHash, std::vector<Item>{}).first->second;
- }
- else
- {
- pItemPool = &it->second;
- }
-
if (tdbOffset == -1)
{
// TODO: Try to reuse items with useCount == 0 if it doesn't tank performance
@@ -382,11 +371,11 @@ FlatValuePool::Item* FlatValuePool::GetOrCreate(const RED4ext::CStackType& acSta
// Failed to create FlatValue
return nullptr;
}
- return &pItemPool->emplace_back(Item(pFlatValue->ToTDBOffset()));
+ return &items.emplace_back(Item(aHash, pFlatValue->ToTDBOffset()));
}
else
{
- return &pItemPool->emplace_back(Item(tdbOffset));
+ return &items.emplace_back(Item(aHash, tdbOffset));
}
}
|
fix versus damage | @@ -18457,7 +18457,7 @@ entity *spawn(float x, float z, float a, int direction, char *name, int index, s
e->speedmul = 1;
ent_set_colourmap(e, 0);
e->lifespancountdown = model->lifespan; // new life span countdown
- if((e->modeldata.type & TYPE_PLAYER) && ((level && level->nohit == DAMAGE_FROM_PLAYER_ON) || savedata.mode))
+ if((e->modeldata.type & TYPE_PLAYER) && ((level && level->nohit == DAMAGE_FROM_PLAYER_OFF) || savedata.mode))
{
e->modeldata.hostile &= ~TYPE_PLAYER;
e->modeldata.candamage &= ~TYPE_PLAYER;
@@ -31303,7 +31303,7 @@ entity *knife_spawn(char *name, int index, float x, float z, float a, int direct
{
e->modeldata.candamage = self->modeldata.candamage;
}
- if((self->modeldata.type & TYPE_PLAYER) && ((level && level->nohit == DAMAGE_FROM_PLAYER_ON) || savedata.mode))
+ if((self->modeldata.type & TYPE_PLAYER) && ((level && level->nohit == DAMAGE_FROM_PLAYER_OFF) || savedata.mode))
{
e->modeldata.hostile &= ~TYPE_PLAYER;
e->modeldata.candamage &= ~TYPE_PLAYER;
|
examples/elf: Drop the 0x when printing pointers. | @@ -42,14 +42,14 @@ int main(int argc, char **argv)
/* Print arguments */
printf("argc\t= %d\n", argc);
- printf("argv\t= 0x%p\n", argv);
+ printf("argv\t= %p\n", argv);
for (i = 0; i < argc; i++)
{
printf("argv[%d]\t= ", i);
if (argv[i])
{
- printf("(0x%p) \"%s\"\n", argv[i], argv[i]);
+ printf("(%p) \"%s\"\n", argv[i], argv[i]);
}
else
{
@@ -57,7 +57,7 @@ int main(int argc, char **argv)
}
}
- printf("argv[%d]\t= 0x%p\n", argc, argv[argc]);
+ printf("argv[%d]\t= %p\n", argc, argv[argc]);
printf("Goodbye, world!\n");
return 0;
}
|
vell: Limit input current to fraction of negotiated limit
Limit input current to 96% of negotiated limit
BRANCH=none
TEST=Connect adapter then check input current. | @@ -82,9 +82,14 @@ int board_set_active_charge_port(int port)
return EC_SUCCESS;
}
-__overridable void board_set_charge_limit(int port, int supplier, int charge_ma,
+void board_set_charge_limit(int port, int supplier, int charge_ma,
int max_ma, int charge_mv)
{
+ /*
+ * Limit the input current to 96% negotiated limit,
+ * to account for the charger chip margin.
+ */
+ charge_ma = charge_ma * 96 / 100;
charge_set_input_current_limit(MAX(charge_ma,
CONFIG_CHARGER_INPUT_CURRENT),
charge_mv);
|
cpeng: update examples in readme | @@ -93,8 +93,8 @@ hps will enumerate the AFU for that subcommand before executing it.
mode which will increment the upper 16 bits in the HPS2HOST register.
## EXAMPLES ##
-The following example loads the image from a file called 'hps.img' into
-offset 0x0000 in one chunk
+The following example loads the image from a file called 'u-boot.itb' into
+offset 0x2000000 in chunks sizes of 4096 bytes
```console
hps cpeng
|
Updated json file for release. | "version": "1.0.0"
}
]
+ },
+ {
+ "name": "OpenCR",
+ "architecture": "OpenCR",
+ "version": "1.4.2",
+ "category": "Arduino",
+ "help": {
+ "online": "https://github.com/ROBOTIS-GIT/OpenCR"
+ },
+ "url": "https://github.com/ROBOTIS-GIT/OpenCR/releases/download/1.4.2/opencr_core_1.4.2.tar.bz2",
+ "archiveFileName": "opencr_core_1.4.2.tar.bz2",
+ "checksum": "SHA-256:47fd04506b0754cef1bc68151909e75c6d83f7d6011f8b63d48588020e64ea9c",
+ "size": "2507212",
+ "help": {
+ "online": "http://emanual.robotis.com/docs/en/parts/controller/opencr10/"
+ },
+ "boards": [
+ {"name": "OpenCR"}
+ ],
+ "toolsDependencies": [
+ {
+ "packager": "OpenCR",
+ "name": "opencr_gcc",
+ "version": "5.4.0-2016q2"
+ },
+ {
+ "packager": "OpenCR",
+ "name": "opencr_tools",
+ "version": "1.0.0"
+ }
+ ]
}
],
"tools":[
|
Update sammple_c | @@ -38,11 +38,29 @@ uint8_t * load_mrb_file(const char *filename)
void mrubyc(uint8_t *mrbbuf)
{
- mrbc_init(memory_pool, MEMORY_SIZE);
+ mrbc_init_alloc(memory_pool, MEMORY_SIZE);
+ init_static();
- if( mrbc_create_task(mrbbuf, 0) != NULL ){
- mrbc_run();
+ struct VM *vm = mrbc_vm_open(NULL);
+ if( vm == 0 ) {
+ fprintf(stderr, "Error: Can't open VM.\n");
+ return;
}
+
+ if( mrbc_load_mrb(vm, mrbbuf) != 0 ) {
+ fprintf(stderr, "Error: Illegal bytecode.\n");
+ return;
+ }
+
+ mrbc_vm_begin(vm);
+
+ #ifdef MRBC_DEBUG
+ vm->flag_debug_mode = 1;
+ #endif
+
+ mrbc_vm_run(vm);
+ mrbc_vm_end(vm);
+ mrbc_vm_close(vm);
}
|
Docker: Sort Alpine Linux packages alphabetically | @@ -2,13 +2,13 @@ FROM alpine:3.8
RUN apk update \
&& apk add --no-cache \
+ bison \
build-base \
cmake \
- git \
- file \
curl \
- yaml-cpp-dev \
- bison
+ file \
+ git \
+ yaml-cpp-dev
# Google Test
ENV GTEST_ROOT=/opt/gtest
|
IO support on win32/rhosim | @@ -650,7 +650,17 @@ void
rb_io_check_closed(rb_io_t *fptr)
{
rb_io_check_initialized(fptr);
+
+//RHO, MSDN docs:
+//In Visual C++ 2005, there is a behavior change.
+//If stdout or stderr is not associated with an output stream (for example, in a Windows application without a console window), the file descriptor returned is -2.
+//In previous versions, the file descriptor returned was -1. This change allows applications to distinguish this condition from an error.
+#ifdef _WIN32
+ if ((fptr->fd < 0) && (fptr->fd != -2) ) {
+#else
if (fptr->fd < 0) {
+#endif
+//RHO
rb_raise(rb_eIOError, closed_stream);
}
}
|
doc: added a component-specific property description for FreeRTOS: ORIG_INCLUDE_PATH | @@ -14,6 +14,8 @@ entries of arbitrary lengths.
:ref:`hooks`: ESP-IDF FreeRTOS hooks provides support for registering extra Idle and
Tick hooks at run time. Moreover, the hooks can be asymmetric amongst both CPUs.
+:ref:`component-specific-properties`: Currently added only one component specific property `ORIG_INCLUDE_PATH`.
+
.. _ring-buffers:
@@ -519,3 +521,13 @@ Hooks API Reference
-------------------
.. include-build-file:: inc/esp_freertos_hooks.inc
+
+
+.. _component-specific-properties:
+
+Component Specific Properties
+-----------------------------
+
+Besides standart component variables that could be gotten with basic cmake build properties FreeRTOS component also provides an arguments (only one so far) for simpler integration with other modules:
+
+- `ORIG_INCLUDE_PATH` - contains an absolute path to freertos root include folder. Thus instead of `#include "freertos/FreeRTOS.h"` you can refer to headers directly: `#include "FreeRTOS.h"`.
|
Change semantics of -l flag to be more useful. | 3)
"-" (fn [&] (set *handleopts* false) 1)
"l" (fn [i &]
- (dofile (get process/args (+ i 1))
+ (import* (get process/args (+ i 1))
:prefix "" :exit *exit-on-error*)
2)
"e" (fn [i &]
|
Disable code generation on dry-run. | @@ -247,6 +247,14 @@ eval
pod2usage();
}
+ ################################################################################################################################
+ # Disable code generation on dry-run
+ ################################################################################################################################
+ if ($bDryRun)
+ {
+ $bNoGen = true;
+ }
+
################################################################################################################################
# Update options for --coverage-summary
################################################################################################################################
|
avoid asprintf | @@ -69,12 +69,13 @@ static int
prepare_http_response(struct http_flow *http_flow, unsigned char **buffer, uint32_t *buffer_len) {
int header_length = 0;
- unsigned char *header_buffer = NULL;
int payload_length = 0;
unsigned char *payload_buffer = NULL;
+ unsigned char *header_buffer = NULL;
int i = 0;
char misc_buffer[512];
+
if (config_log_level >= 2) {
fprintf(stderr, "%s()\n", __func__);
}
@@ -127,8 +128,14 @@ prepare_http_response(struct http_flow *http_flow, unsigned char **buffer, uint3
exit(EXIT_FAILURE);
}
+ header_buffer = malloc(512);
+ if (header_buffer == NULL) {
+ fprintf(stderr, "%s - malloc failed\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
// prepare response header
- header_length = asprintf((char **)&header_buffer,
+ header_length = snprintf((char *) header_buffer, 1024,
"HTTP/1.1 200 OK\r\n"
"Server: NEAT super fancy webserver\r\n"
"Content-Length: %u\r\n"
|
doc: remove "Glossary" icon box on home page
Adding a big box for the glossary disturbed the home page layout and is
overkill for this one document. Adding the glossary to the left
navigation menu is sufficient. | @@ -64,13 +64,6 @@ through an open source platform.
</a>
<p>Supported hardware platforms and boards</p>
</li>
- <li class="grid-item">
- <a href="glossary.html">
- <img alt="" src="_static/images/ACRNlogo80w.png"/>
- <h2>Glossary<br/>of Terms</h2>
- </a>
- <p>Glossary of useful terms</p>
- </li>
</ul>
|
Configure: allow conditions and variable values to have variable references
This will allow building variables on other variables, and to have
conditions based on variable contents. | @@ -1881,7 +1881,7 @@ if ($builder eq "unified") {
qr/^\s*IF\[((?:\\.|[^\\\]])*)\]\s*$/
=> sub {
if (! @skip || $skip[$#skip] > 0) {
- push @skip, !! $1;
+ push @skip, !! $expand_variables->($1);
} else {
push @skip, -1;
}
@@ -1890,7 +1890,7 @@ if ($builder eq "unified") {
=> sub { die "ELSIF out of scope" if ! @skip;
die "ELSIF following ELSE" if abs($skip[$#skip]) == 2;
$skip[$#skip] = -1 if $skip[$#skip] != 0;
- $skip[$#skip] = !! $1
+ $skip[$#skip] = !! $expand_variables->($1)
if $skip[$#skip] == 0; },
qr/^\s*ELSE\s*$/
=> sub { die "ELSE out of scope" if ! @skip;
@@ -1902,7 +1902,9 @@ if ($builder eq "unified") {
qr/^\s*${variable_re}\s*=\s*(.*?)\s*$/
=> sub {
if (!@skip || $skip[$#skip] > 0) {
- $variables{$1} = $2;
+ my $n = $1;
+ my $v = $2;
+ $variables{$n} = $expand_variables->($v);
}
},
qr/^\s*SUBDIRS\s*=\s*(.*)\s*$/
|
vell: Lower host shutdown percentage to 3%
Regarding to b/215338892 to lower host shutdown percentage to 3%
BRANCH=none
TEST=Pass battery RTC battery life on S5. | #define CONFIG_KEYBOARD_FACTORY_TEST
#define CONFIG_KEYBOARD_REFRESH_ROW3
+#undef CONFIG_BATT_HOST_SHUTDOWN_PERCENTAGE
+#define CONFIG_BATT_HOST_SHUTDOWN_PERCENTAGE 3
+
/*
* Older boards have a different ADC assignment.
*/
|
Git Resolver: Fix detection of LibGit2
Before this update detecting LibGit2 would fail, if we treated warnings
as errors (`-Werror`). The cause of this problem was that compiling
`gitresolver_test.c` produced a warning about an incorrect return value. | #include <git2.h>
-void main (void)
+int main (void)
{
git_libgit2_init ();
git_index_add_frombuffer (NULL, NULL, NULL, (size_t) NULL);
git_libgit2_shutdown ();
+ return 0;
}
|
Python: improving ASGI http send message processing. | @@ -262,22 +262,23 @@ nxt_py_asgi_http_send(PyObject *self, PyObject *dict)
nxt_unit_req_debug(http->req, "asgi_http_send type is '%.*s'",
(int) type_len, type_str);
- if (type_len == (Py_ssize_t) response_start.length
- && memcmp(type_str, response_start.start, type_len) == 0)
- {
- return nxt_py_asgi_http_response_start(http, dict);
+ if (nxt_unit_response_is_init(http->req)) {
+ if (nxt_str_eq(&response_body, type_str, (size_t) type_len)) {
+ return nxt_py_asgi_http_response_body(http, dict);
}
- if (type_len == (Py_ssize_t) response_body.length
- && memcmp(type_str, response_body.start, type_len) == 0)
- {
- return nxt_py_asgi_http_response_body(http, dict);
+ return PyErr_Format(PyExc_RuntimeError,
+ "Expected ASGI message 'http.response.body', "
+ "but got '%U'", type);
}
- nxt_unit_req_error(http->req, "asgi_http_send: unexpected 'type': '%.*s'",
- (int) type_len, type_str);
+ if (nxt_str_eq(&response_start, type_str, (size_t) type_len)) {
+ return nxt_py_asgi_http_response_start(http, dict);
+ }
- return PyErr_Format(PyExc_AssertionError, "unexpected 'type': '%U'", type);
+ return PyErr_Format(PyExc_RuntimeError,
+ "Expected ASGI message 'http.response.start', "
+ "but got '%U'", type);
}
|
adding libm __fpclassify and __fpclassifyf | @@ -191,8 +191,8 @@ GOW(fmodf, fFff)
// __fmodf_finite
// __fmod_finite
GOW(fmodl, DFDD)
-// __fpclassify
-// __fpclassifyf
+GO(__fpclassify, iFd)
+GO(__fpclassifyf, iFf)
GOW(frexp, dFdp)
GOW(frexpf, fFfp)
GO2(frexpl, LFLp, frexp)
|
tools: trying to fix key generation in gen-gpg-testkey
Big thank you goes to the GnuPG developers for guiding me into the right direction.
This is hopefully the cure for . | */
#include <gpgme.h>
+#include <locale.h>
#include <stdio.h>
-#define ELEKTRA_GEN_GPG_TESTKEY_UNUSED __attribute__ ((unused))
#define ELEKTRA_GEN_GPG_TESTKEY_DESCRIPTION "elektra testkey (gen-gpg-testkey)"
-gpgme_error_t passphrase_cb (void * hook ELEKTRA_GEN_GPG_TESTKEY_UNUSED, const char * uid_hint ELEKTRA_GEN_GPG_TESTKEY_UNUSED,
- const char * passphrase_info ELEKTRA_GEN_GPG_TESTKEY_UNUSED, int prev_was_bad ELEKTRA_GEN_GPG_TESTKEY_UNUSED,
- int fd)
-{
- gpgme_io_writen (fd, "\n", 2);
- return 0;
-}
-
int main (void)
{
gpgme_error_t err;
@@ -29,6 +21,13 @@ int main (void)
gpgme_genkey_result_t res;
gpgme_check_version (NULL);
+
+ setlocale (LC_ALL, "");
+ gpgme_set_locale (NULL, LC_CTYPE, setlocale (LC_CTYPE, NULL));
+#ifndef HAVE_W32_SYSTEM
+ gpgme_set_locale (NULL, LC_MESSAGES, setlocale (LC_MESSAGES, NULL));
+#endif
+
err = gpgme_engine_check_version (GPGME_PROTOCOL_OpenPGP);
if (err)
{
@@ -45,28 +44,28 @@ int main (void)
}
// configure gpgme
- gpgme_set_pinentry_mode (ctx, GPGME_PINENTRY_MODE_LOOPBACK);
- gpgme_set_passphrase_cb (ctx, passphrase_cb, NULL);
+ gpgme_set_protocol (ctx, GPGME_PROTOCOL_OpenPGP);
+ gpgme_set_armor (ctx, 1);
// look for the elektra key
err = gpgme_op_keylist_start (ctx, ELEKTRA_GEN_GPG_TESTKEY_DESCRIPTION, 1 /* secret keys only! */);
if (err)
{
- fprintf (stderr, "gpgme error: %s\n", gpgme_strerror (err));
+ fprintf (stderr, "error while looking for the key: %s\n", gpgme_strerror (err));
goto cleanup;
}
err = gpgme_op_keylist_next (ctx, &key);
if (err && gpg_err_code (err) != GPG_ERR_EOF)
{
- fprintf (stderr, "gpgme error: %s\n", gpgme_strerror (err));
+ fprintf (stderr, "error while looking for the key: %s\n", gpgme_strerror (err));
goto cleanup;
}
if (err && gpg_err_code (err) == GPG_ERR_EOF)
{
- err = gpgme_op_createkey (ctx, ELEKTRA_GEN_GPG_TESTKEY_DESCRIPTION, NULL, 0, 0, NULL,
- GPGME_CREATE_SIGN | GPGME_CREATE_ENCR);
+ // generate a new key
+ err = gpgme_op_createkey (ctx, ELEKTRA_GEN_GPG_TESTKEY_DESCRIPTION, NULL, 0, 0, NULL, GPGME_CREATE_NOPASSWD);
if (err)
{
@@ -80,7 +79,7 @@ int main (void)
}
else
{
- // display the key ID
+ // display the ID of the existing test key
fprintf (stdout, "%s", key->subkeys->fpr);
gpgme_key_release (key);
gpgme_op_keylist_end (ctx);
|
put run_epsdb into aomp-dev branch, so aomp_clone_test will succeed | @@ -9,6 +9,8 @@ aompdir="$(dirname "$parentdir")"
set -x
+(cd $aompdir/bin ; git checkout aomp-dev )
+
# we have a new Target memory manager appearing soon in aomp 12
# it seems to either cause or reveal double free or corruption
# in lots of tests. This set to 0, disables the new TMM.
|
add profil log | //#define MAP_DEBUG
+//#define MAP_PROFIL
// we don't want to share them
@@ -199,6 +200,10 @@ static void setMapColumn(Map *map, u16 column, u16 x, u16 y)
static void setMapColumnEx(Map *map, u16 column, u16 y, u16 h, u16 xm, u16 ym)
{
+#ifdef MAP_PROFIL
+ u16 start = GET_VCOUNTER;
+#endif
+
const u16 addr = VDP_getPlaneAddress(map->plane, column * 2, y * 2);
const u16 pw = planeWidth;
@@ -207,6 +212,11 @@ static void setMapColumnEx(Map *map, u16 column, u16 y, u16 h, u16 xm, u16 ym)
// get temp buffer for second tile column and schedule DMA
u16* bufCol2 = DMA_allocateAndQueueDma(DMA_VRAM, addr + 2, h * 2, pw * 2);
+#ifdef MAP_PROFIL
+ u16 end = GET_VCOUNTER;
+ KLog_S2("DMA_allocateAndQueueDma - duration=", end-start, " h=", h);
+#endif
+
#if (LIB_LOG_LEVEL >= LOG_LEVEL_ERROR)
if (!bufCol1 || !bufCol2)
{
@@ -271,6 +281,10 @@ static void setMapRowEx(Map *map, u16 row, u16 x, u16 w, u16 xm, u16 ym)
static void prepareMapDataColumn(const MapDefinition *mapDef, u16 baseTile, u16 *bufCol1, u16 *bufCol2, u16 xm, u16 ym, u16 height)
{
+#ifdef MAP_PROFIL
+ u16 start = GET_VCOUNTER;
+#endif
+
// we can add both base index and base palette
const u16 baseAttr = baseTile & (TILE_INDEX_MASK | TILE_ATTR_PALETTE_MASK);
@@ -371,6 +385,11 @@ static void prepareMapDataColumn(const MapDefinition *mapDef, u16 baseTile, u16
block += blockFixedOffset;
}
}
+
+#ifdef MAP_PROFIL
+ u16 end = GET_VCOUNTER;
+ KLog_S3("prepareMapDataColumn - start=", start, " end=", end, " h=", height);
+#endif
}
static void prepareMapDataRow(const MapDefinition *mapDef, u16 baseTile, u16 *bufRow1, u16 *bufRow2, u16 xm, u16 ym, u16 width)
|
Change Binance Smart Chain to BSC | @@ -210,7 +210,7 @@ APPNAME = "Theta"
else ifeq ($(CHAIN),bsc)
APP_LOAD_PARAMS += --path "44'/60'"
DEFINES += CHAINID_UPCASE=\"BSC\" CHAINID_COINNAME=\"BNB\" CHAIN_KIND=CHAIN_KIND_BSC CHAIN_ID=56
-APPNAME = "Binance Smart Chain"
+APPNAME = "BSC"
else
ifeq ($(filter clean,$(MAKECMDGOALS)),)
$(error Unsupported CHAIN - use ethereum, ropsten, ethereum_classic, expanse, poa, artis_sigma1, artis_tau1, rsk, rsk_testnet, ubiq, wanchain, kusd, musicoin, pirl, akroma, atheios, callisto, ethersocial, ellaism, ether1, ethergem, gochain, mix, reosc, hpb, tomochain, tobalaba, dexon, volta, ewc, webchain, thundercore, flare, flare_coston, theta)
|
Ensure protocol is properly parsed given a full request string.
This attempts to extract the protocol given the full request string
(METHOD REQ PROTO) by finding first the last space character and
comparing the remaining string against a valid HTTP protocol. | @@ -796,14 +796,16 @@ contains_usecs (void)
*
* If not valid, 1 is returned.
* If valid, 0 is returned. */
-static int
+static const char *
invalid_protocol (const char *token)
{
const char *lookfor;
- return !((lookfor = "HTTP/1.0", !strncmp (token, lookfor, 8)) ||
+ if ((lookfor = "HTTP/1.0", !strncmp (token, lookfor, 8)) ||
(lookfor = "HTTP/1.1", !strncmp (token, lookfor, 8)) ||
- (lookfor = "HTTP/2", !strncmp (token, lookfor, 6)));
+ (lookfor = "HTTP/2", !strncmp (token, lookfor, 6)))
+ return lookfor;
+ return NULL;
}
/* Parse a request containing the method and protocol.
@@ -814,8 +816,8 @@ invalid_protocol (const char *token)
static char *
parse_req (char *line, char **method, char **protocol)
{
- char *req = NULL, *request = NULL, *proto = NULL, *dreq = NULL;
- const char *meth;
+ char *req = NULL, *request = NULL, *dreq = NULL, *ptr = NULL;
+ const char *meth, *proto;
ptrdiff_t rlen;
meth = extract_method (line);
@@ -827,14 +829,11 @@ parse_req (char *line, char **method, char **protocol)
/* method found, attempt to parse request */
else {
req = line + strlen (meth);
- if ((proto = strstr (line, " HTTP/1.0")) == NULL &&
- (proto = strstr (line, " HTTP/1.1")) == NULL &&
- (proto = strstr (line, " HTTP/2")) == NULL) {
+ if (!(ptr = strrchr (req, ' ')) || !(proto = invalid_protocol (++ptr)))
return alloc_string ("-");
- }
req++;
- if ((rlen = proto - req) <= 0)
+ if ((rlen = ptr - req) <= 0)
return alloc_string ("-");
request = xmalloc (rlen + 1);
@@ -845,15 +844,18 @@ parse_req (char *line, char **method, char **protocol)
(*method) = strtoupper (xstrdup (meth));
if (conf.append_protocol)
- (*protocol) = strtoupper (xstrdup (++proto));
+ (*protocol) = strtoupper (xstrdup (proto));
}
- if ((dreq = decode_url (request)) && *dreq != '\0') {
- free (request);
- return dreq;
+ if (!(dreq = decode_url (request)))
+ return request;
+ else if (*dreq == '\0') {
+ free (dreq);
+ return request;
}
- return request;
+ free (request);
+ return dreq;
}
/* Extract the next delimiter given a log format and copy the
|
openssl ca: open the output file as late as possible
Fixes | @@ -726,10 +726,6 @@ end_of_options:
output_der = 1;
batch = 1;
}
- Sout = bio_open_default(outfile, 'w',
- output_der ? FORMAT_ASN1 : FORMAT_TEXT);
- if (Sout == NULL)
- goto end;
}
if (md == NULL && (md = lookup_conf(conf, section, ENV_DEFAULT_MD)) == NULL)
@@ -1025,6 +1021,11 @@ end_of_options:
if (verbose)
BIO_printf(bio_err, "writing %s\n", new_cert);
+ Sout = bio_open_default(outfile, 'w',
+ output_der ? FORMAT_ASN1 : FORMAT_TEXT);
+ if (Sout == NULL)
+ goto end;
+
Cout = BIO_new_file(new_cert, "w");
if (Cout == NULL) {
perror(new_cert);
@@ -1033,6 +1034,8 @@ end_of_options:
write_new_certificate(Cout, xi, 0, notext);
write_new_certificate(Sout, xi, output_der, notext);
BIO_free_all(Cout);
+ BIO_free_all(Sout);
+ Sout = NULL;
}
if (sk_X509_num(cert_sk)) {
@@ -1181,6 +1184,11 @@ end_of_options:
if (!do_X509_CRL_sign(crl, pkey, dgst, sigopts))
goto end;
+ Sout = bio_open_default(outfile, 'w',
+ output_der ? FORMAT_ASN1 : FORMAT_TEXT);
+ if (Sout == NULL)
+ goto end;
+
PEM_write_bio_X509_CRL(Sout, crl);
if (crlnumberfile != NULL) /* Rename the crlnumber file */
|
unit-test: make a fresh coverage report every time
It is easier to use if everytime you run all tests or a specific test,
that the coverage report only shows the result of the last run, and
not the result of all previous runs combined. | @@ -416,7 +416,7 @@ else()
add_custom_command(
OUTPUT gcovr/coverage.html __dummy
COMMAND ${CMAKE_COMMAND} -E make_directory gcovr
- COMMAND ${GCOVR} --gcov-executable gcov-8 --html-details -o gcovr/coverage.html -r ${CMAKE_SOURCE_DIR} -f ${CMAKE_SOURCE_DIR}/src
+ COMMAND ${GCOVR} --gcov-executable gcov-8 --delete --html-details -o gcovr/coverage.html -r ${CMAKE_SOURCE_DIR} -f ${CMAKE_SOURCE_DIR}/src
)
add_custom_target(
coverage
|
fix expedited typo | @@ -27,7 +27,7 @@ TO build the toolchains, you should run:
./scripts/build-toolchains.sh
.. Note:: If you are planning to use the Hwacha vector unit, or other RoCC-based accelerators, you should build the esp-tools toolchains by adding the ``esp-tools`` argument to the script above.
- If you are running on an Amazon Web Services EC2 instance, intending to use FireSim, you can also use the ``--ec2fast`` flag for an expediated installation of a pre-compiled toolchain.
+ If you are running on an Amazon Web Services EC2 instance, intending to use FireSim, you can also use the ``--ec2fast`` flag for an expedited installation of a pre-compiled toolchain.
What's Next?
|
Markdown Shell Recorder: Fix typo in CMake file | @@ -14,7 +14,7 @@ add_s_test (conditionals "${CMAKE_SOURCE_DIR}/src/plugins/conditionals/README.md
add_s_test (hosts "${CMAKE_SOURCE_DIR}/src/plugins/hosts/README.md")
add_s_test (line "${CMAKE_SOURCE_DIR}/src/plugins/line/README.md")
add_s_test (mathcheck "${CMAKE_SOURCE_DIR}/src/plugins/mathcheck/README.md")
-add_s_test (mozpref "${CMAKE_SOURCE_DIR}/src/plugins/mozpref/README.md")
+add_s_test (mozpref "${CMAKE_SOURCE_DIR}/src/plugins/mozprefs/README.md")
add_s_test (tutorial_cascading "${CMAKE_SOURCE_DIR}/doc/tutorials/cascading.md")
add_s_test (tutorial_mount "${CMAKE_SOURCE_DIR}/doc/tutorials/mount.md")
add_s_test (kdb-complete "${CMAKE_SOURCE_DIR}/doc/help/kdb-complete.md")
|
apps/netutils/netinit/netinit.c: Improve cleanup, removing 1 of 2 warnings. Unhook PHY notification signal handler when cleaning up, if an error occurs after the signal handler is put into place. | @@ -555,6 +555,7 @@ static int netinit_monitor(void)
struct timespec reltime;
struct ifreq ifr;
struct sigaction act;
+ struct sigaction oact;
bool devup;
int ret;
int sd;
@@ -584,7 +585,7 @@ static int netinit_monitor(void)
act.sa_sigaction = netinit_signal;
act.sa_flags = SA_SIGINFO;
- ret = sigaction(CONFIG_NETINIT_SIGNO, &act, NULL);
+ ret = sigaction(CONFIG_NETINIT_SIGNO, &act, &oact);
if (ret < 0)
{
ret = -errno;
@@ -758,7 +759,7 @@ static int netinit_monitor(void)
errout_with_notification:
# warning Missing logic
errout_with_sigaction:
-# warning Missing logic
+ (void)sigaction(CONFIG_NETINIT_SIGNO, &oact, NULL);
errout_with_socket:
close(sd);
errout:
|
Add anchors in generated docs
This allows us to link to specific functions. | :ref ref
:source-map sm
:doc docstring} env-entry
+ html-key (html-escape key)
binding-type (cond
macro :macro
ref (string :var " (" (type (get ref 0)) ")")
source-ref (if-let [[path start end] sm]
(string "<span class=\"source-map\">" path " (" start ":" end ")</span>")
"")]
- (string "<h2 class=\"binding\">" (html-escape key) "</h2>\n"
+ (string "<h2 class=\"binding\"><a id=\"" key "\">" html-key "</a></h2>\n"
"<span class=\"binding-type\">" binding-type "</span>\n"
"<p class=\"docstring\">" (trim-lead (html-escape docstring)) "</p>\n"
source-ref)))
|
Remove unused llContainer value from ctx.tx | @@ -24,7 +24,6 @@ typedef struct
{
volatile uint8_t lock; // Transmit lock state
uint8_t *data; // data to compare for collision detection
- ll_container_t llContainer; // Container sending the message
volatile transmitStatus_t status; // data to compare for collision detection
volatile uint8_t collision; // true is a collision occure during this transmission.
} TxCom_t;
|
fix config reload | @@ -84,7 +84,9 @@ od_instance_main(od_instance_t *instance, int argc, char **argv)
od_router_free(&router);
return 0;
}
- instance->config_file = argv[1];
+
+ instance->config_file = malloc(sizeof(char) * strlen(argv[1]));
+ strcpy(instance->config_file, argv[1]);
/* read config file */
od_error_t error;
|
fix error in basic plugin | @@ -25,7 +25,10 @@ protoop_arg_t update_rtt(picoquic_cnx_t *cnx)
if (ack_delay < PICOQUIC_ACK_DELAY_MAX) {
/* if the ACK is reasonably recent, use it to update the RTT */
/* find the stored copy of the largest acknowledged packet */
- uint64_t sequence_number = get_pkt(packet, PKT_AK_SEQUENCE_NUMBER);
+ uint64_t sequence_number = 0;
+ if (packet != NULL) {
+ sequence_number = get_pkt(packet, PKT_AK_SEQUENCE_NUMBER);
+ }
while (packet != NULL && sequence_number > largest) {
packet = (picoquic_packet_t *) get_pkt(packet, PKT_AK_NEXT_PACKET);
|
Change node port proxy by overriding require directly in Module. | @@ -43,19 +43,16 @@ const addon = (() => {
return addon;
})();
-/* Monkey patch require for simplifying load */
-Module.prototype.require = new Proxy(Module.prototype.require, {
- apply(target, module, args) {
-
- const node_require = () => {
- return Reflect.apply(target, module, args);
- };
+const node_require = Module.require;
const metacall_require = (tag, name) => {
// TODO: Inspect the current handle and append it to an object mocking the function calls with metacall
return addon.metacall_load_from_file(tag, [ name ]);
};
+/* Monkey patch require for simplifying load */
+Module.prototype.require = (id) => {
+
const tags = {
mock: 'mock',
py: 'py',
@@ -64,24 +61,22 @@ Module.prototype.require = new Proxy(Module.prototype.require, {
/*dll: 'cs',*/
};
- const name = args[0];
- const index = name.lastIndexOf('.');
+ const index = id.lastIndexOf('.');
if (index === -1) {
- return node_require();
+ return node_require(id);
} else {
// Load the module
- const ext = name.substr(index + 1);
- const tag = tags[ext];
+ const extension = id.substr(index + 1);
+ const tag = tags[extension];
if (tag) {
- return metacall_require(tag, name);
+ return metacall_require(tag, id);
} else {
- return node_require();
- }
+ return node_require(id);
}
}
-});
+};
/* Export the API */
module.exports = {
|
update with latest Makefile | @@ -9,15 +9,16 @@ OBJS := $(addprefix $(OBJDIR)/, $(_OBJS))
LIBDISCORD_CFLAGS := -I./
LIBDISCORD_LDFLAGS := -L./$(LIBDIR) -ldiscord -lcurl
+
LIBDISCORD_LDFLAGS += -lbearssl -static
+
LIBS_CFLAGS := $(LIBDISCORD_CFLAGS)
LIBS_LDFLAGS := $(LIBDISCORD_LDFLAGS)
LIBDISCORD_SLIB := $(LIBDIR)/libdiscord.a
-CFLAGS := -Wall -Wextra -pedantic -std=c11 -O0 -g -DLIBDISCORD_DEBUG
-
+CFLAGS := -Wall -Wextra -pedantic -std=c11 -O0 -g -DLIBDISCORD_DEBUG -D__stensal__
CFLAGS += -D_DEFAULT_SOURCE
@@ -28,12 +29,25 @@ PREFIX ?= /usr/local
all : mkdir $(OBJS) $(LIBDISCORD_SLIB)
mkdir :
- echo "mkdir"
mkdir -p $(OBJDIR) $(LIBDIR)
+test : all test-api.c test-ws.c test-json-scanf.c
+ $(CC) $(CFLAGS) $(LIBS_CFLAGS) \
+ test-api.c -o test-api.exe $(LIBS_LDFLAGS)
+ $(CC) $(CFLAGS) $(LIBS_CFLAGS) \
+ test-ws.c -o test-ws.exe $(LIBS_LDFLAGS)
+ $(CC) $(CFLAGS) $(LIBS_CFLAGS) \
+ test-json-scanf.c -o test-json-scanf.exe $(LIBS_LDFLAGS)
+
echo-bot : all echo-bot.c
$(CC) $(CFLAGS) $(LIBS_CFLAGS) \
echo-bot.c -o echo-bot.exe $(LIBS_LDFLAGS)
+pin-bot : all pin-bot.c
+ $(CC) $(CFLAGS) $(LIBS_CFLAGS) \
+ pin-bot.c -o pin-bot.exe $(LIBS_LDFLAGS)
+ping-pong-bot : all ping-pong-bot.c
+ $(CC) $(CFLAGS) $(LIBS_CFLAGS) \
+ ping-pong-bot.c -o ping-pong-bot.exe $(LIBS_LDFLAGS)
$(OBJDIR)/discord-%.o : discord-%.c
$(CC) $(CFLAGS) $(LIBS_CFLAGS) -c -o $@ $<
|
Removes MBEDTLS_ECDH_LEGACY_CONTEXT from check_config.h
Commit removes MBEDTLS_ECDH_LEGACY_CONTEXT
checks from check_config.h. | #error "MBEDTLS_ECP_RESTARTABLE defined, but it cannot coexist with an alternative or PSA-based ECP implementation"
#endif
-#if defined(MBEDTLS_ECP_RESTARTABLE) && \
- ! defined(MBEDTLS_ECDH_LEGACY_CONTEXT)
-#error "MBEDTLS_ECP_RESTARTABLE defined, but not MBEDTLS_ECDH_LEGACY_CONTEXT"
-#endif
-
-#if defined(MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED) && \
- defined(MBEDTLS_ECDH_LEGACY_CONTEXT)
-#error "MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED defined, but MBEDTLS_ECDH_LEGACY_CONTEXT not disabled"
-#endif
-
#if defined(MBEDTLS_ECDSA_DETERMINISTIC) && !defined(MBEDTLS_HMAC_DRBG_C)
#error "MBEDTLS_ECDSA_DETERMINISTIC defined, but not all prerequisites"
#endif
|
state what we want to achieve | # A project to support using C for high-level programming
+The project is spawned from our internal projects to replace our C++ backend
+with C code. We hope it serves the following goals:
+
+1. Demonstrate how to implement/use containers in C
+
+2. Make C easy to use for developing "high-level" programs
+
+3. Create more opportunities for C lovers who want to use C but cannot
+(or don't want) get into embedded system developments
+
+
Software development is a process, developers have different primary
concerns at different phases of the process. We need to provide
-sufficient support for using C in each phase. The following are the primary
-phases:
+sufficient support for using C in each phase. The following are the
+primary phases:
* Proof of Concept (POC):
The default settings should be easy to use to support a quick POC development.
|
checkcall for ffmpeg so it can throws error | @@ -10,7 +10,7 @@ def concat_videos(settings):
# if os.name == 'nt':
# command = '"' + command + '"'
# os.system(command)
- subprocess.call([settings.ffmpeg, '-safe', '0', '-f', 'concat', '-i', settings.temp + 'listvideo.txt', '-c', 'copy', f, '-y'])
+ subprocess.check_call([settings.ffmpeg, '-safe', '0', '-f', 'concat', '-i', settings.temp + 'listvideo.txt', '-c', 'copy', f, '-y'])
def cleanup(settings):
@@ -23,7 +23,7 @@ def cleanup(settings):
def mix_video_audio(settings):
_, file_extension = os.path.splitext(settings.output)
f = settings.temp + "outputf" + file_extension
- subprocess.call([settings.ffmpeg, '-i', f, '-i', settings.temp + 'audio.mp3', '-c:v', 'copy', '-c:a', 'aac', settings.output, '-y'])
+ subprocess.check_call([settings.ffmpeg, '-i', f, '-i', settings.temp + 'audio.mp3', '-c:v', 'copy', '-c:a', 'aac', settings.output, '-y'])
def convert_tomp4(settings, output="output.mp4"):
|
naive: another spawn proxy test | spawn-proxy.own:(~(got by points.state) ~sambud)
::
++ test-l2-sambud-spawn-proxy-predeposit-spawn ^- tang
+ =/ l2-sproxy [[~sambud %spawn] %set-spawn-proxy (addr %sambud-skey-1)]
=/ lf-spawn [[~sambud %spawn] %spawn ~lisdur-fodrys (addr %lf-key-0)]
+ ;: weld
%+ expect-eq
!> [`@ux`(addr %lf-key-0) 0]
::
!>
=| =^state:naive
=^ f state (init-sambud state)
- =^ f state (n state (changed-spawn-proxy:l1 ~sambud (addr %sambud-skey)))
+ =^ f state (n state (changed-spawn-proxy:l1 ~sambud (addr %sambud-skey-0)))
+ =^ f state (n state (changed-spawn-proxy:l1 ~sambud deposit-address:naive))
+ =^ f state (n state %bat q:(gen-tx 0 lf-spawn %sambud-skey-0))
+ transfer-proxy.own:(~(got by points.state) ~lisdur-fodrys)
+ ::
+ %+ expect-eq
+ !> [`@ux`(addr %lf-key-0) 0]
+ ::
+ !>
+ =| =^state:naive
+ =^ f state (init-sambud state)
+ =^ f state (n state (changed-spawn-proxy:l1 ~sambud (addr %sambud-skey-0)))
=^ f state (n state (changed-spawn-proxy:l1 ~sambud deposit-address:naive))
- =^ f state (n state %bat q:(gen-tx 0 lf-spawn %sambud-skey))
+ =^ f state (n state %bat q:(gen-tx 0 l2-sproxy %sambud-skey-0))
+ =^ f state (n state %bat q:(gen-tx 1 lf-spawn %sambud-skey-1))
transfer-proxy.own:(~(got by points.state) ~lisdur-fodrys)
+ ==
::
++ test-linnup-torsyx-spawn ^- tang
:: try to spawn a L2 planet with a L2 planet
|
Doc: Update the documentation for spilled transaction statistics.
Reported-by: Sawada Masahiko
Author: Sawada Masahiko, Amit Kapila
Discussion: | @@ -2518,6 +2518,12 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
mechanism will simply display NULL lag.
</para>
+ <para>
+ Tracking of spilled transactions works only for logical replication. In
+ physical replication, the tracking mechanism will display 0 for spilled
+ statistics.
+ </para>
+
<note>
<para>
The reported lag times are not predictions of how long it will take for
|
README.md: Use version 2.04.99 | @@ -8,7 +8,11 @@ As hardware the [RaspBee](https://www.dresden-elektronik.de/raspbee?L=1&ref=gh)
To learn more about the REST API itself please visit the [REST API Documentation](http://dresden-elektronik.github.io/deconz-rest-doc/) page.
### Phoscon App Beta
-The *Phoscon App* is the successor of the current WebApp (Wireless Light Control), it's browser based too and in open beta state, for more information check out:
+The *Phoscon App* is the successor of the current WebApp (Wireless Light Control), it's browser based too and in open beta state, for more information and screenshots check out:
+
+https://www.dresden-elektronik.de/funktechnik/solutions/wireless-light-control/phoscon-app?L=1
+
+Development updates are posted here:
https://github.com/dresden-elektronik/phoscon-app-beta
@@ -24,11 +28,11 @@ Raspbian Wheezy and Qt4 is no longer maintained.
### Install deCONZ
1. Download deCONZ package
- wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.04.97-qt5.deb
+ wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.04.99-qt5.deb
2. Install deCONZ package
- sudo dpkg -i deconz-2.04.97-qt5.deb
+ sudo dpkg -i deconz-2.04.99-qt5.deb
**Important** this step might print some errors *that's ok* and will be fixed in the next step.
@@ -43,11 +47,11 @@ The deCONZ package already contains the REST API plugin, the development package
1. Download deCONZ development package
- wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.04.97.deb
+ wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.04.99.deb
2. Install deCONZ development package
- sudo dpkg -i deconz-dev-2.04.97.deb
+ sudo dpkg -i deconz-dev-2.04.99.deb
3. Install missing dependencies
@@ -62,7 +66,7 @@ The deCONZ package already contains the REST API plugin, the development package
2. Checkout related version tag
cd deconz-rest-plugin
- git checkout -b mybranch V2_04_97
+ git checkout -b mybranch V2_04_99
3. Compile the plugin
|
Fix old cascade format function to work with Python3. | @@ -223,7 +223,7 @@ def cascade_binary_old(path, n_stages, name):
n_stages = max_stages
# read stages
- stages = [len(t.childNodes)/2 for t in trees][0:n_stages]
+ stages = [len(t.childNodes)//2 for t in trees][0:n_stages]
stage_threshold = xmldoc.getElementsByTagName('stage_threshold')[0:n_stages]
# total number of features
|
missing negation | @@ -805,7 +805,7 @@ static void encoder_state_encode_leaf(encoder_state_t * const state)
// Very spesific bug that happens when owf length is longer than the
// gop length. Takes care of that.
- if(state->encoder_control->cfg.gop_lowdelay &&
+ if(!state->encoder_control->cfg.gop_lowdelay &&
!state->encoder_control->cfg.open_gop &&
state->encoder_control->cfg.owf > state->encoder_control->cfg.gop_len &&
ref_state->frame->slicetype == KVZ_SLICE_I &&
|
dill: "downcast" +call error notification to %crud | wrapped-task=(hobo task:able)
==
^+ [*(list move) ..^$]
- ?< ?=(^ dud)
+ ~| wrapped-task
=/ task=task:able ((harden task:able) wrapped-task)
+ ::
+ :: error notifications "downcast" to %crud
+ ::
+ =? task ?=(^ dud)
+ ~| %crud-in-crud
+ ?< ?=(%crud -.task)
+ [%crud -.task tang.u.dud]
+ ::
:: the boot event passes thru %dill for initial duct distribution
::
?: ?=(%boot -.task)
|
symbolic link from history to tmp | @@ -100,27 +100,31 @@ func (rc *Config) createWorkDir(cmd string, attach bool) {
sessionID := getSessionID()
tmpDirName := path.Base(cmd) + "_" + sessionID + "_" + pid + "_" + ts
+ // History directory
+ histDir := HistoryDir()
+ err := os.MkdirAll(histDir, 0755)
+ util.CheckErrSprintf(err, "error creating history dir: %v", err)
+
+ // Working directory
if attach {
- // "History" directory (/tmp for attach)
// Validate /tmp exists
if !util.CheckDirExists("/tmp") {
util.ErrAndExit("/tmp directory does not exist")
}
- // Working directory (0777 permissions)
+ // Create working directory in /tmp (0777 permissions)
rc.WorkDir = filepath.Join("/tmp", tmpDirName)
oldmask := syscall.Umask(0)
err := os.Mkdir(rc.WorkDir, 0777)
syscall.Umask(oldmask)
util.CheckErrSprintf(err, "error creating workdir dir: %v", err)
- } else {
- // History directory
- histDir := HistoryDir()
- err := os.MkdirAll(histDir, 0755)
- util.CheckErrSprintf(err, "error creating history dir: %v", err)
+ // Symbolic link between /tmp/tmpDirName and /history/tmpDirName
+ rootHistDir := filepath.Join(histDir, tmpDirName)
+ os.Symlink(rc.WorkDir, rootHistDir)
- // Working directory
+ } else {
+ // Create working directory in history/
rc.WorkDir = filepath.Join(HistoryDir(), tmpDirName)
err = os.Mkdir(rc.WorkDir, 0755)
util.CheckErrSprintf(err, "error creating workdir dir: %v", err)
@@ -128,7 +132,7 @@ func (rc *Config) createWorkDir(cmd string, attach bool) {
// Cmd directory
cmdDir := filepath.Join(rc.WorkDir, "cmd")
- err := os.Mkdir(cmdDir, 0755)
+ err = os.Mkdir(cmdDir, 0755)
util.CheckErrSprintf(err, "error creating cmd dir: %v", err)
// Payloads directory
|
wamr: Add a new option to enable semaphore support | @@ -89,6 +89,11 @@ config INTERPRETERS_WAMR_LIB_PTHREAD
bool "Enable lib pthread"
default n
+config INTERPRETERS_WAMR_LIB_PTHREAD_SEMAPHORE
+ bool "Enable semaphore"
+ depends on INTERPRETERS_WAMR_LIB_PTHREAD
+ default n
+
config INTERPRETERS_WAMR_SHARED_MEMORY
bool "Enable shared memory"
default n
|
pg_upgrade: add a test to catch VACUUM FREEZE failures
VACUUM FREEZE must function correctly during binary upgrade so that the
new cluster's catalogs don't contain bogus transaction IDs. Do a simple
check on the QD in our test script, by querying the age of all the rows
in gp_segment_configuration. | @@ -91,6 +91,43 @@ restore_cluster()
fi
}
+# Test for a nasty regression -- if VACUUM FREEZE doesn't work correctly during
+# upgrade, things fail later in mysterious ways. As a litmus test, check to make
+# sure that catalog tables have been frozen. (We use gp_segment_configuration
+# because the upgrade shouldn't have touched it after the freeze.)
+check_vacuum_worked()
+{
+ local datadir=$1
+ local contentid=$2
+
+ echo "Verifying VACUUM FREEZE using gp_segment_configuration xmins..."
+
+ # Start the instance using the same pg_ctl invocation used by pg_upgrade.
+ "${NEW_BINDIR}/pg_ctl" -w -l /dev/null -D "${datadir}" \
+ -o "-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=0 --gp_contentid=${contentid} --xid_warn_limit=10000000 -b" \
+ start
+
+ # Query for the xmin ages.
+ local xmin_ages=$( \
+ PGOPTIONS='-c gp_session_role=utility' \
+ psql -c 'SELECT age(xmin) FROM pg_catalog.gp_segment_configuration GROUP BY age(xmin);' \
+ -p 5432 -t -A template1 \
+ )
+
+ # Stop the instance.
+ "${NEW_BINDIR}/pg_ctl" -l /dev/null -D "${datadir}" stop
+
+ # Check to make sure all the xmins are frozen (maximum age).
+ while read age; do
+ if [ "$age" -ne 2147483647 ]; then
+ echo "ERROR: gp_segment_configuration has an entry of age $age"
+ return 1
+ fi
+ done <<< "$xmin_ages"
+
+ return 0
+}
+
upgrade_qd()
{
mkdir -p $1
@@ -104,6 +141,11 @@ upgrade_qd()
fi
popd
+ if ! check_vacuum_worked "$3" -1; then
+ echo "ERROR: VACUUM FREEZE appears to have failed during QD upgrade"
+ exit 1
+ fi
+
# Remember where we were when we upgraded the QD node. pg_upgrade generates
# some files there that we need to copy to QE nodes.
qddir=$1
@@ -124,6 +166,10 @@ upgrade_segment()
exit 1
fi
popd
+
+ # TODO: run check_vacuum_worked on each segment, too, once we have a good
+ # candidate catalog table (gp_segment_configuration doesn't exist on
+ # segments).
}
usage()
|
ames: don't skip closing flows in +on-stir
Yes, there is a global timer for closing flows, but all that does is
enqueue a cork message. +on-stir needs to set _pump_ timers for all
flows that might still have messages to send, which includes closing
flows. | =/ snds=(list (list [ship bone message-pump-state]))
%+ turn states
|= [=ship peer-state]
- %+ murn ~(tap by snd)
+ %+ turn ~(tap by snd)
|= [=bone =message-pump-state]
- ?: (~(has in closing) bone) ~
- `[ship bone message-pump-state]
+ [ship bone message-pump-state]
=/ next-wakes
%+ turn `(list [ship bone message-pump-state])`(zing snds)
|= [=ship =bone message-pump-state]
|
naive: use gen-tx-octs from naive-transactions library | ::
!!
[raw tx]:-.u.batch
- :: %don [(encode-tx:naive +.part-tx) +.part-tx]
+ ::
+ %don [(gen-tx-octs:lib +.part-tx) +.part-tx]
%ful +.part-tx
==
:: +pending-state
|
doc(coder) Improve documentation comments in coder.lua | @@ -102,8 +102,11 @@ local function c_float(n)
return string.format("%f", n)
end
+-- @param ctype: (string) C datatype, as produced by ctype()
+-- @param varname: (string) C variable name
+-- @returns A syntactically valid variable declaration
local function c_declaration(ctyp, varname)
- -- This simple concatenation won't work with function pointers
+ -- This would be harder if we also allowed array or function pointers...
return ctyp .. " " .. varname
end
@@ -123,6 +126,10 @@ end
-- @param type Type of the titan value
-- @returns type of the corresponding C variable
+--
+-- We currently represent C types as strings. This suffices for primitive types
+-- and pointers to primitive types but we might need to switch to a full ADT if
+-- we decide to also support array and function pointer types.
local function ctype(typ)
local tag = typ._tag
if tag == types.T.Nil then return "int"
@@ -185,6 +192,9 @@ local function toplevel_is_value_declaration(tl_node)
end
end
+-- @param prog: (ast) Annotated AST for the whole module
+-- @param modname: (string) Lua module name (for luaopen)
+-- @return (string) C code for the whole module
generate_program = function(prog, modname)
-- Find where each global variable gets stored in the global table
@@ -275,8 +285,8 @@ generate_program = function(prog, modname)
end
-- Construct the values in the toplevel
- -- This needs to happen inside a C closure with all the same upvalues that a
- -- titan function has, because the initializer expressions might rely on
+ -- This needs to happen inside a C closure with all the same upvalues that
+ -- a titan function has, because the initializer expressions might rely on
-- that.
local initialize_toplevel
do
@@ -366,7 +376,8 @@ generate_program = function(prog, modname)
return pretty.reindent_c(code)
end
-
+-- @param stat: (ast.Stat)
+-- @return (string) C statements
generate_stat = function(stat)
local tag = stat._tag
if tag == ast.Stat.Block then
@@ -528,7 +539,15 @@ generate_stat = function(stat)
end
end
--- @returns (statements, c_lvalue)
+-- @param var: (ast.Var)
+-- @returns (string, string) C Statements, and a C lvalue
+--
+-- The lvalue should not not contain side-effects. Anything that could care
+-- about evaluation order should be returned as part of the first argument.
+--
+-- TODO: Rethink what this function should return once we add arrays and
+-- records (the "lvalue" might be a slot, which requires setting a tag when
+-- writing to it)
generate_var = function(var)
local tag = var._tag
if tag == ast.Var.Name then
@@ -560,7 +579,11 @@ generate_var = function(var)
end
end
--- @returns (statements, cvalue)
+-- @param exp: (ast.Exp)
+-- @returns (string, string) C statements, C rvalue
+--
+-- The rvalue should not not contain side-effects. Anything that could care
+-- about evaluation order should be returned as part of the first argument.
generate_exp = function(exp) -- TODO
local tag = exp._tag
if tag == ast.Exp.Nil then
|
Implement RADIO_CONST_MAX_PAYLOAD_LEN: cooja-radio | #include "dev/radio.h"
#include "dev/cooja-radio.h"
-#define COOJA_RADIO_BUFSIZE cooja_radio_driver_max_payload_len
+/*
+ * The maximum number of bytes this driver can accept from the MAC layer for
+ * transmission or will deliver to the MAC layer after reception. Includes
+ * the MAC header and payload, but not the FCS.
+ */
+#define COOJA_RADIO_BUFSIZE 125
+
#define CCA_SS_THRESHOLD -95
const struct simInterface radio_interface;
@@ -337,6 +343,9 @@ get_value(radio_param_t param, radio_value_t *value)
/* return a fixed value depending on the channel */
*value = -90 + simRadioChannel - 11;
return RADIO_RESULT_OK;
+ case RADIO_CONST_MAX_PAYLOAD_LEN:
+ *value = (radio_value_t)COOJA_RADIO_BUFSIZE;
+ return RADIO_RESULT_OK;
default:
return RADIO_RESULT_NOT_SUPPORTED;
}
|
Rename macosx package before installation | @@ -535,6 +535,7 @@ jobs:
elif [ "$RUNNER_OS" == "Windows" ]; then
find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*win*.whl" -exec python -m pip install {} \;
elif [ "$RUNNER_OS" == "macOS" ]; then
+ find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*macosx*.whl" | while read f; do mv -v "$f" "${f/11_0/10_15}"; done
find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*macosx*.whl" -exec python -m pip install {} \;
fi
|
lv_img_decoder: fix incorrect debug assert | @@ -392,9 +392,9 @@ lv_res_t lv_img_decoder_built_in_open(lv_img_decoder_t * decoder, lv_img_decoder
user_data->opa = lv_mem_alloc(palette_size * sizeof(lv_opa_t));
if(user_data->palette == NULL || user_data->opa == NULL) {
LV_LOG_ERROR("img_decoder_built_in_open: out of memory");
-#if LV_USE_FILESYSTEM
- LV_ASSERT_MEM(user_data->f);
-#endif
+
+ LV_ASSERT_MEM(user_data->palette);
+ LV_ASSERT_MEM(user_data->opa);
}
if(dsc->src_type == LV_IMG_SRC_FILE) {
|
Check for thread handle | @@ -61,7 +61,7 @@ Thread* lovrThreadCreate(int (*runner)(void*), Blob* body) {
void lovrThreadDestroy(void* ref) {
Thread* thread = ref;
mtx_destroy(&thread->lock);
- thrd_detach(thread->handle);
+ if (thread->handle) thrd_detach(thread->handle);
lovrRelease(thread->body, lovrBlobDestroy);
free(thread->error);
free(thread);
@@ -85,7 +85,7 @@ void lovrThreadStart(Thread* thread, Variant* arguments, uint32_t argumentCount)
}
void lovrThreadWait(Thread* thread) {
- thrd_join(thread->handle, NULL);
+ if (thread->handle) thrd_join(thread->handle, NULL);
}
bool lovrThreadIsRunning(Thread* thread) {
|
Adding IDE Proj location for Mediatek | @@ -34,6 +34,9 @@ afr_set_board_metadata(IDE_uVision_NAME "uVision")
afr_set_board_metadata(IDE_uVision_COMPILER "Keil-ARM")
afr_set_board_metadata(IS_ACTIVE "TRUE")
+afr_set_board_metadata(IDE_uVision_PROJECT_LOCATION "${CMAKE_CURRENT_LIST_DIR}/aws_demos/uvision")
+afr_set_board_metadata(AWS_DEMOS_CONFIG_FILES_LOCATION "${CMAKE_CURRENT_LIST_DIR}/aws_demos/common/config_files")
+
# -------------------------------------------------------------------------------------------------
# Compiler settings
# -------------------------------------------------------------------------------------------------
|
test_case.py: add new line between test cases | @@ -81,7 +81,7 @@ class TestCase:
out.write(self.description + '\n')
if self.dependencies:
out.write('depends_on:' + ':'.join(self.dependencies) + '\n')
- out.write(self.function + ':' + ':'.join(self.arguments))
+ out.write(self.function + ':' + ':'.join(self.arguments) + '\n')
def write_data_file(filename: str,
test_cases: Iterable[TestCase],
|
Fix doc example code to follow coding style | @@ -89,14 +89,13 @@ Code that looked like this:
{
int rv;
char *stmp, *vtmp = NULL;
+
stmp = OPENSSL_strdup(value);
- if (!stmp)
+ if (stmp == NULL)
return -1;
vtmp = strchr(stmp, ':');
- if (vtmp) {
- *vtmp = 0;
- vtmp++;
- }
+ if (vtmp != NULL)
+ *vtmp++ = '\0';
rv = EVP_MAC_ctrl_str(ctx, stmp, vtmp);
OPENSSL_free(stmp);
return rv;
@@ -105,9 +104,9 @@ Code that looked like this:
...
- char *macopt;
for (i = 0; i < sk_OPENSSL_STRING_num(macopts); i++) {
- macopt = sk_OPENSSL_STRING_value(macopts, i);
+ char *macopt = sk_OPENSSL_STRING_value(macopts, i);
+
if (pkey_ctrl_string(mac_ctx, macopt) <= 0) {
BIO_printf(bio_err,
"MAC parameter error \"%s\"\n", macopt);
@@ -119,37 +118,40 @@ Code that looked like this:
Can be written like this instead:
OSSL_PARAM *params =
- OPENSSL_zalloc(sizeof(OSSL_PARAM)
+ OPENSSL_zalloc(sizeof(*params)
* (sk_OPENSSL_STRING_num(opts) + 1));
const OSSL_PARAM *paramdefs = EVP_MAC_CTX_set_param_types(mac);
size_t params_n;
+ char *opt = "<unknown>";
for (params_n = 0; params_n < (size_t)sk_OPENSSL_STRING_num(opts);
params_n++) {
- char *opt = sk_OPENSSL_STRING_value(opts, (int)params_n);
char *stmp, *vtmp = NULL;
+ opt = sk_OPENSSL_STRING_value(opts, (int)params_n);
if ((stmp = OPENSSL_strdup(opt)) == NULL
- || (vtmp = strchr(stmp, ':')) == NULL
- || (*vtmp++ = '\0') /* Always zero */
- || !OSSL_PARAM_allocate_from_text(¶ms[params_n],
- paramdefs,
- stmp, vtmp, strlen(vtmp))) {
- BIO_printf(bio_err, "MAC parameter error '%s'\n", opt);
- ERR_print_errors(bio_err);
+ || (vtmp = strchr(stmp, ':')) == NULL)
+ goto err;
+
+ *vtmp++ = '\0';
+ if (!OSSL_PARAM_allocate_from_text(¶ms[params_n],
+ paramdefs, stmp,
+ vtmp, strlen(vtmp)))
goto err;
- }
}
params[params_n] = OSSL_PARAM_construct_end();
- if (!EVP_MAC_CTX_set_params(ctx, params)) {
- BIO_printf(bio_err, "MAC parameter error\n");
- ERR_print_errors(bio_err);
+ if (!EVP_MAC_CTX_set_params(ctx, params))
goto err;
- }
- for (; params_n-- > 0;) {
+ while (params_n-- > 0)
OPENSSL_free(params[params_n].data);
- }
OPENSSL_free(params);
+ /* ... */
+ return;
+
+ err:
+ BIO_printf(bio_err, "MAC parameter error '%s'\n", opt);
+ ERR_print_errors(bio_err);
+
=head1 SEE ALSO
|
Drop wrong front() - spotted by elric@ | @@ -197,12 +197,7 @@ public:
return Ptr()[Len() - 1];
}
- inline const TCharType& front() const noexcept {
- Y_ASSERT(!empty());
- return Ptr()[0];
- }
-
- inline TCharType& front() noexcept {
+ inline const TCharType front() const noexcept {
Y_ASSERT(!empty());
return Ptr()[0];
}
|
ls2k1000 cpu frequency 1GHz | extern unsigned char __bss_end;
-#define CPU_HZ (100 * 1000 * 1000)
+#define CPU_HZ (1000 * 1000 * 1000) //QEMU 200*1000*1000
#define RT_HW_HEAP_BEGIN KSEG1BASE//(void*)&__bss_end
#define RT_HW_HEAP_END (void*)(RT_HW_HEAP_BEGIN + 64 * 1024 * 1024)
|
add MinGW build guideline, add new version of Visual Studio | @@ -84,6 +84,11 @@ Generating make files on unix:
cd build
cmake .. # or ccmake .. for a GUI.
+.. note::
+
+ If you don't want to build docs or ``Sphinx`` is not installed, you should add ``"-DJANSSON_BUILD_DOCS=OFF"`` in the ``cmake`` command.
+
+
Then to build::
make
@@ -115,6 +120,7 @@ Creating Visual Studio project files from the command line:
- ``Visual Studio 12 2013``
- ``Visual Studio 14 2015``
- ``Visual Studio 15 2017``
+ - ``Visual Studio 16 2019``
Any later version should also work.
@@ -136,6 +142,21 @@ for the project, run::
cmake -LH ..
+Windows (MinGW)
+^^^^^^^^^^^^^^^^^^^^^^^
+If you prefer using MinGW on Windows, make sure MinGW installed and ``{MinGW}/bin`` has been added to ``PATH``, then do the following commands:
+
+.. parsed-literal::
+
+ <unpack>
+ cd jansson-|release|
+
+ md build
+ cd build
+ cmake -G "MinGW Makefiles" ..
+ mingw32-make
+
+
Mac OSX (Xcode)
^^^^^^^^^^^^^^^
If you prefer using Xcode instead of make files on OSX,
|
TINC: Stop sleeping, why ? let system work. | @@ -62,7 +62,6 @@ class GPDBStorageBaseTestCase():
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
- sleep(30)
def start_db(self):
'''Gpstart '''
|
sched/spawn: Fix the minor typo error | @@ -218,10 +218,10 @@ int task_spawnattr_getstacksize(FAR const posix_spawnattr_t *attr,
int task_spawnattr_setstacksize(FAR posix_spawnattr_t *attr,
size_t stacksize);
#else
-# define task_spawnattr_getstackaddr(fa, addr) (*(addr) = NULL, 0)
-# define task_spawnattr_setstackaddr(fa) (0)
-# define task_spawnattr_getstacksize(fa, size) (*(size) = 0, 0)
-# define task_spawnattr_setstacksize(fa) (0)
+# define task_spawnattr_getstackaddr(attr, addr) (*(addr) = NULL, 0)
+# define task_spawnattr_setstackaddr(attr, addr) (0)
+# define task_spawnattr_getstacksize(attr, size) (*(size) = 0, 0)
+# define task_spawnattr_setstacksize(attr, size) (0)
#endif
/* Non standard debug functions */
@@ -232,7 +232,7 @@ void posix_spawn_file_actions_dump(
void posix_spawnattr_dump(FAR posix_spawnattr_t *attr);
#else
# define posix_spawn_file_actions_dump(fa)
-# define posix_spawnattr_dump(a)
+# define posix_spawnattr_dump(attr)
#endif
#ifdef __cplusplus
|
docs: fix broken link to lcd example | @@ -225,9 +225,9 @@ Application Example
LCD examples are located under: :example:`peripherals/lcd`:
+* Universal SPI LCD example with SPI touch - :example:`peripherals/lcd/spi_lcd_touch`
* Jpeg decoding and LCD display - :example:`peripherals/lcd/tjpgd`
* i80 controller based LCD and LVGL animation UI - :example:`peripherals/lcd/i80_controller`
-* GC9A01 user customized driver and dash board UI - :example:`peripherals/lcd/gc9a01`
* RGB panel example with scatter chart UI - :example:`peripherals/lcd/rgb_panel`
* I2C interfaced OLED display scrolling text - :example:`peripherals/lcd/i2c_oled`
|
parser: json: reset time lookup, do not use current time | @@ -165,7 +165,7 @@ int flb_parser_json_do(struct flb_parser *parser,
tmp[len] = '\0';
flb_warn("[parser:%s] invalid time format %s for '%s'",
parser->name, parser->time_fmt_full, tmp);
- time_lookup = time(NULL);
+ time_lookup = 0;
}
else {
time_lookup = flb_parser_tm2time(&tm);
|
input_chunk: fix variables definition if metrics are disabled | @@ -66,10 +66,12 @@ int flb_input_chunk_write_at(void *data, off_t offset,
struct flb_input_chunk *flb_input_chunk_map(struct flb_input_instance *in,
void *chunk)
{
+#ifdef FLB_HAVE_METRICS
int ret;
int records;
char *buf_data;
size_t buf_size;
+#endif
struct flb_input_chunk *ic;
/* Create context for the input instance */
|
Free up regexes/strings after benchmark.
It's not important to do, but we want to be a good
example for newbies. | @@ -6,12 +6,11 @@ var str
var dotstar, hello, world
const main = {
- str = std.sldup("hello world!")
- str = std.strcat(str, str)
- str = std.strcat(str, str)
- str = std.strcat(str, str)
- str = std.strcat(str, str)
- str = std.strcat(str, "x")
+ str = ""
+ for var i = 0; i < 16; i++
+ std.sljoin(&str, "hello world!")
+ ;;
+ std.sljoin(&str, "x")
dotstar = std.try(regex.compile(".*"))
hello = std.try(regex.compile("hel*o"))
@@ -22,6 +21,11 @@ const main = {
[.name="searchhello", .fn=searchhello],
[.name="searchworld", .fn=searchworld],
][:])
+
+ regex.free(dotstar)
+ regex.free(hello)
+ regex.free(world)
+ std.slfree(str)
}
const matchall = {ctx
|
fix(example) scroll example sqort types
line 32 of lv_example_scroll_6.c, if LV_USE_LARGE_COORD not configured,
x_sqr will overflow when r is greater than 256. | @@ -29,7 +29,7 @@ static void scroll_event_cb(lv_event_t * e)
x = r;
} else {
/*Use Pythagoras theorem to get x from radius and y*/
- lv_coord_t x_sqr = r * r - diff_y * diff_y;
+ uint32_t x_sqr = r * r - diff_y * diff_y;
lv_sqrt_res_t res;
lv_sqrt(x_sqr, &res, 0x8000); /*Use lvgl's built in sqrt root function*/
x = r - res.i;
|
Removed $glyph case from the talk-report mark. | ?- -.rep
$cabal (cabl +.rep)
$house a+(turn (~(tap by +.rep)) jose)
- $glyph ((jome |=(a/char a) nack) +.rep)
$grams (jobe num+(jone p.rep) tele+[%a (turn q.rep gram)] ~)
$group (jobe local+(grop p.rep) global+%.(q.rep (jome parn grop)) ~)
==
|
Fix assignment to uninitialized pointer
One of these things is not like the others...
Refs | @@ -1132,7 +1132,7 @@ void dotnet_parse_tilde_2(
pe->object, "assembly_refs[%i].version.minor", i);
set_integer(assemblyref_table->BuildNumber,
pe->object, "assembly_refs[%i].version.build_number", i);
- set_integer(assembly_table->RevisionNumber,
+ set_integer(assemblyref_table->RevisionNumber,
pe->object, "assembly_refs[%i].version.revision_number", i);
blob_offset = pe->data + metadata_root + streams->blob->Offset;
|
Make sure the dpb is more than max_num_reorder_pics | @@ -146,6 +146,7 @@ static void encoder_state_write_bitstream_vid_parameter_set(bitstream_t* stream,
int max_buffer = max_required_dpb_size(encoder);
int max_reorder = max_num_reorder_pics(encoder);
+ if (max_buffer - 1 < max_reorder) max_buffer = max_reorder + 1;
WRITE_UE(stream, max_buffer - 1, "vps_max_dec_pic_buffering_minus1");
WRITE_UE(stream, max_reorder, "vps_max_num_reorder_pics");
@@ -417,6 +418,7 @@ static void encoder_state_write_bitstream_seq_parameter_set(bitstream_t* stream,
//for each layer
int max_buffer = max_required_dpb_size(encoder);
int max_reorder = max_num_reorder_pics(encoder);
+ if (max_buffer - 1 < max_reorder) max_buffer = max_reorder + 1;
WRITE_UE(stream, max_buffer - 1, "sps_max_dec_pic_buffering_minus1");
WRITE_UE(stream, max_reorder, "sps_max_num_reorder_pics");
|
Add warning message if name or architecture is unknown. | @@ -300,12 +300,18 @@ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Emscripten")
set(TINYSPLINE_PLATFORM_NAME "wasm" CACHE INTERNAL "")
set(TINYSPLINE_PLATFORM_IS_WINDOWS False CACHE INTERNAL "")
else()
+ message(WARNING "Undefined target name: ${CMAKE_SYSTEM_NAME}"
+ " -- Builds might be broken")
set(TINYSPLINE_PLATFORM_NAME "undefined" CACHE INTERNAL "")
set(TINYSPLINE_PLATFORM_IS_WINDOWS False CACHE INTERNAL "")
endif()
include(TargetArch)
target_architecture(TINYSPLINE_PLATFORM_ARCH)
+if(${TINYSPLINE_PLATFORM_ARCH} STREQUAL "unknown")
+ message(WARNING "Unknown target architecture"
+ " -- Builds might be broken")
+endif()
set(TINYSPLINE_PLATFORM
"${TINYSPLINE_PLATFORM_NAME}-${TINYSPLINE_PLATFORM_ARCH}"
|
esp_http_client: fix issue where http parser was not invoking `message_complete` callback | @@ -849,7 +849,16 @@ int esp_http_client_read(esp_http_client_handle_t client, char *buffer, int len)
if (rlen <= 0) {
if (errno != 0) {
- ESP_LOGW(TAG, "esp_transport_read returned : %d and errno : %d ", rlen, errno);
+ esp_log_level_t sev = ESP_LOG_WARN;
+ /* On connection close from server, recv should ideally return 0 but we have error conversion
+ * in `tcp_transport` SSL layer which translates it `-1` and hence below additional checks */
+ if (rlen == -1 && errno == ENOTCONN && client->response->is_chunked) {
+ /* Explicit call to parser for invoking `message_complete` callback */
+ http_parser_execute(client->parser, client->parser_settings, res_buffer->data, 0);
+ /* ...and lowering the message severity, as closed connection from server side is expected in chunked transport */
+ sev = ESP_LOG_DEBUG;
+ }
+ ESP_LOG_LEVEL(sev, TAG, "esp_transport_read returned:%d and errno:%d ", rlen, errno);
}
return ridx;
}
|
Error handling: helpful message when concept fails | @@ -356,13 +356,17 @@ static int grib_concept_apply(grib_accessor* a, const char* name)
err = nofail ? GRIB_SUCCESS : GRIB_CONCEPT_NO_MATCH;
if (err) {
size_t i = 0, concept_count = 0;
- long editionNumber = 0;
+ long dummy = 0, editionNumber = 0;
char* all_concept_vals[MAX_NUM_CONCEPT_VALUES] = {
NULL,
}; /* sorted array containing concept values */
grib_concept_value* pCon = concepts;
grib_context_log(h->context, GRIB_LOG_ERROR, "concept: no match for %s=%s", act->name, name);
+ if (strcmp(act->name, "paramId")==0 && string_to_long(name, &dummy)==GRIB_SUCCESS) {
+ grib_context_log(h->context, GRIB_LOG_ERROR,
+ "Please check the Parameter Database 'https://apps.ecmwf.int/codes/grib/param-db/?id=%s'", name);
+ }
if (grib_get_long(h, "edition", &editionNumber) == GRIB_SUCCESS) {
grib_context_log(h->context, GRIB_LOG_ERROR, "concept: input handle edition=%ld", editionNumber);
}
|
[viostor] introduce SENSE_INFO structure | @@ -135,6 +135,12 @@ typedef struct virtio_bar {
BOOLEAN bPortSpace;
} VIRTIO_BAR, *PVIRTIO_BAR;
+typedef struct _SENSE_INFO {
+ UCHAR senseKey;
+ UCHAR additionalSenseCode;
+ UCHAR additionalSenseCodeQualifier;
+} SENSE_INFO, *PSENSE_INFO;
+
typedef struct _ADAPTER_EXTENSION {
VirtIODevice vdev;
@@ -172,11 +178,15 @@ typedef struct _ADAPTER_EXTENSION {
ULONG perfFlags;
PSTOR_DPC dpc;
BOOLEAN dpc_ok;
+ BOOLEAN check_condition;
+ SENSE_INFO sense_info;
+#if (NTDDI_VERSION > NTDDI_WIN7)
+ STOR_ADDR_BTL8 device_address;
+#endif
#ifdef DBG
ULONG srb_cnt;
ULONG inqueue_cnt;
#endif
- BOOLEAN check_condition;
}ADAPTER_EXTENSION, *PADAPTER_EXTENSION;
typedef struct _VRING_DESC_ALIAS
|
BugID:21372370:[hal tcp]improve hal tcp read | @@ -208,8 +208,6 @@ int32_t HAL_TCP_Read(uintptr_t fd, char *buf, uint32_t len, uint32_t timeout_ms)
do {
t_left = aliot_platform_time_left(t_end, HAL_UptimeMs());
if (0 == t_left) {
- PLATFORM_LOG_D("%s no time left", __func__);
- err_code = -1;
break;
}
FD_ZERO(&sets);
|
Improve text for events | @@ -417,11 +417,18 @@ typedef enum lwesp_evt_type_t {
Optionally enabled with \ref LWESP_CFG_KEEP_ALIVE */
#if LWESP_CFG_MODE_STATION || __DOXYGEN__
- LWESP_EVT_WIFI_CONNECTED, /*!< Station just connected to AP */
+ LWESP_EVT_WIFI_CONNECTED, /*!< Station just connected to access point.
+ When received, station may not have yet valid IP hence new connections
+ cannot be started in this mode */
LWESP_EVT_WIFI_GOT_IP, /*!< Station has valid IP.
- When this event is received to application, no IP has been read from device.
- Stack will proceed with IP read from device and will later send \ref LWESP_EVT_WIFI_IP_ACQUIRED event */
- LWESP_EVT_WIFI_DISCONNECTED, /*!< Station just disconnected from AP */
+ When this event is received to application, ESP has got IP from access point,
+ but no IP has been read from device and at this moment it is still being unknown to application.
+ Stack will proceed with IP read from device and will later send \ref LWESP_EVT_WIFI_IP_ACQUIRED event.
+
+ Note: When IPv6 is enabled, this event may be called multiple times during single connection to access point,
+ as device may report "got IP" several times.
+ Application must take care when starting new conection from this event, not to start it multiple times */
+ LWESP_EVT_WIFI_DISCONNECTED, /*!< Station just disconnected from access point */
LWESP_EVT_WIFI_IP_ACQUIRED, /*!< Station IP address acquired.
At this point, valid IP address has been received from device.
Application may use \ref lwesp_sta_copy_ip function to read it */
|
fix cmake options for building rocprofiler, especially the CMAKE_PREFIX_PATH to find includes | @@ -86,18 +86,17 @@ if [ "$1" != "nocmake" ] && [ "$1" != "install" ] ; then
fi
BUILD_TYPE="release"
export CMAKE_BUILD_TYPE=$BUILD_TYPE
- CMAKE_PREFIX_PATH="$ROCM_DIR/include/platform;$ROCM_DIR/include;$ROCM_DIR/lib;$ROCM_DIR"
+ CMAKE_PREFIX_PATH="$ROCM_DIR/roctracer/include/ext;$ROCM_DIR/include/platform;$ROCM_DIR/include;$ROCM_DIR/lib;$ROCM_DIR"
export CMAKE_PREFIX_PATH
#export HSA_RUNTIME_INC=$ROCM_DIR/include
#export HSA_RUNTIME_LIB=$ROCM_DIR/include/lib
#export HSA_KMT_LIB=$ROCM_DIR/lib
#export HSA_KMT_LIB_PATH=$ROCM_DIR/lib
- export HIP_PATH=$ROCM_DIR
GFXSEMICOLONS=`echo $GFXLIST | tr ' ' ';' `
mkdir -p $BUILD_AOMP/build/rocprofiler
cd $BUILD_AOMP/build/rocprofiler
echo " -----Running rocprofiler cmake ---- "
- ${AOMP_CMAKE} -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_INSTALL_PREFIX=$INSTALL_ROCPROF -DCMAKE_PREFIX_PATH="""$CMAKE_PREFIX_PATH""" -DHIP_VDI=1 -DHIP_PATH=$ROCM_DIR $CMAKE_WITH_EXPERIMENTAL $AOMP_ORIGIN_RPATH -DGPU_TARGETS="""$GFXSEMICOLONS""" -DPROF_API_HEADER_PATH=$INSTALL_ROCPROF/include/roctracer/ext $AOMP_REPOS/$AOMP_PROF_REPO_NAME
+ ${AOMP_CMAKE} -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_INSTALL_PREFIX=$INSTALL_ROCPROF -DCMAKE_PREFIX_PATH="""$CMAKE_PREFIX_PATH""" $CMAKE_WITH_EXPERIMENTAL $AOMP_ORIGIN_RPATH -DGPU_TARGETS="""$GFXSEMICOLONS""" -DPROF_API_HEADER_PATH=$INSTALL_ROCPROF/include/roctracer/ext $AOMP_REPOS/$AOMP_PROF_REPO_NAME
if [ $? != 0 ] ; then
echo "ERROR rocprofiler cmake failed. cmake flags"
echo " $MYCMAKEOPTS"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.