message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Only set palettes on gbc | @@ -26,6 +26,7 @@ void ApplyPaletteChange(UBYTE index) {
UWORD *col = BkgPalette;
UWORD *col_s = SprPalette;
+#ifdef CGB
for (pal = 0; pal < 8; pal++) {
for (c = 0; c < 4; ++c, ++col, ++col_s) {
palette[c] = UpdateColor(index, *col);
@@ -34,6 +35,7 @@ void ApplyPaletteChange(UBYTE index) {
set_bkg_palette(pal, 1, palette);
set_sprite_palette(pal, 1, palette_s);
}
+#endif
OBP0_REG = obj_fade_vals[index];
BGP_REG = bgp_fade_vals[index];
|
docs: update a description of indirect approach | @@ -62,7 +62,7 @@ The direct approach is suitable for IoT devices capable of direct access to a bl
The indirect approach accomodates IoT devices that can not otherwise directly access the blockchain node, due to various possible reasons such as an IP whitelist restriction and unmatched cryptographic algorithm capability.
-By calling *BoAT-Engine* APIs, the IoT device signs the data with the device's cryptographic key. It then may pack the signature and the data in a datagram and send the datagram to an intermediate edge gateway. The gateway which runs the *BoAT-Anchor* splits the datagram, and then sends the data to the IoT platform and the signature (i.e. digital fingerprint) to the blockchain. It also requests *BoAT-Mast* to check if the data come from a registered device.
+By calling *BoAT-Engine* APIs, the IoT device signs the data with the device's cryptographic key. It then send the signature to an intermediate edge gateway. The gateway which runs the *BoAT-Anchor* splits the datagram, and then sends the data to the IoT platform and the signature (i.e. digital fingerprint) to the blockchain. It also requests *BoAT-Mast* to check if the data come from a registered device.
The data consumer could later verify the data stored on the IoT platform against the on-chain signature.
|
Fix WKTGeometryWriter to NOT use scientific encoding | @@ -35,6 +35,12 @@ namespace carto {
typedef std::function<std::shared_ptr<Geometry>(bool)> Func;
+ template <typename Num>
+ struct RealPolicy : karma::real_policies<Num> {
+ static unsigned int precision(Num n) { return 15; }
+ static int floatfield(Num n) { return karma::real_policies<Num>::fmtflags::fixed; }
+ };
+
template <typename Iterator>
struct Grammar : karma::grammar<Iterator, std::shared_ptr<Geometry>(bool)> {
Grammar() : Grammar::base_type(geometry) {
@@ -45,7 +51,8 @@ namespace carto {
using karma::_3;
using karma::_r1;
- pos = (karma::double_ << ' ' << karma::double_ << -(karma::lit(" ") << karma::double_)) [_pass = phx::bind(&GetMapPos, _val, _r1, _1, _2, _3)];
+ coord = karma::real_generator<double, RealPolicy<double>>();
+ pos = (coord << ' ' << coord << -(karma::lit(" ") << coord)) [_pass = phx::bind(&GetMapPos, _val, _r1, _1, _2, _3)];
ring = pos(_r1) % ',';
rings = '(' << (ring(_r1) % ',') << ')';
type = karma::lit(" Z") [_pass = _r1];
@@ -62,6 +69,7 @@ namespace carto {
;
}
+ karma::rule<Iterator, double()> coord;
karma::rule<Iterator, MapPos(bool)> pos;
karma::rule<Iterator, std::vector<MapPos>(bool)> ring;
karma::rule<Iterator, std::vector<std::vector<MapPos> >(bool)> rings;
|
OcAppleDiskImageLib: Fix cleanup on load failure | @@ -322,15 +322,19 @@ OcAppleDiskImageInstallBlockIo (
Status = gBS->ConnectController (BlockIoHandle, NULL, NULL, TRUE);
if (EFI_ERROR (Status)) {
- gBS->UninstallMultipleProtocolInterfaces (
+ Status = gBS->UninstallMultipleProtocolInterfaces (
BlockIoHandle,
- &gEfiBlockIoProtocolGuid,
- &DiskImageData->BlockIo,
&gEfiDevicePathProtocolGuid,
&DiskImageData->DevicePath,
+ &gEfiBlockIoProtocolGuid,
+ &DiskImageData->BlockIo,
NULL
);
+ if (!EFI_ERROR (Status)) {
FreePool (DiskImageData);
+ } else {
+ DiskImageData->Signature = 0;
+ }
gBS->FreePages (RamDmgAddress, NumRamDmgPages);
return NULL;
}
|
Add HEX_EDNSDATA documentation | <RRs, one per line>
SECTION ADDITIONAL
<RRs, one per line>
+ HEX_EDNSDATA_BEGIN
+ <Hex data of an EDNS option>
+ HEX_EDNSDATA_END
EXTRA_PACKET ; follow with SECTION, REPLY for more packets.
HEX_ANSWER_BEGIN ; follow with hex data
; this replaces any answer packet constructed
@@ -88,6 +91,12 @@ SECTION ANSWER
www.nlnetlabs.nl. IN A 195.169.215.155
SECTION AUTHORITY
nlnetlabs.nl. IN NS www.nlnetlabs.nl.
+HEX_EDNSDATA_BEGIN
+00 03 ; NSID
+00 04 ; LENGTH
+4E 53 ; NS
+49 44 ; ID
+HEX_EDNSDATA_END
ENTRY_END
ENTRY_BEGIN
|
apps/utils: add command execution way for history
The history functionality supports two ways to execute former commands.
1. By !number
2. By up and down keys
This commit adds above in the history tab of the README. | @@ -505,7 +505,8 @@ Heap Allocation Information per User defined Group
## history
-This command shows the history you executed, and you can re-execute it by calling the number with `!`.
+This command shows the history you executed.
+Using this functionality, you can re-execute the command by calling the number with `!` or by up and down keys.
```bash
TASH>>history
TASH command history
@@ -538,6 +539,10 @@ uptime wm_test
TASH>>
```
+>**NOTE**
+>We provide the configuration, *CONFIG_TASH_MAX_STORE_COMMANDS*, to set the maximum number of commands due to memory limitations.
+>When the history has maximum number of commands stored, the oldest command will be removed to store the last command.
+
### How to Enable
Set *CONFIG_TASH_MAX_STORE_COMMANDS* value to use this command on menuconfig as shown below:
```
|
tests: fix buffer overflow in output_warnings | @@ -414,37 +414,37 @@ int output_warnings (Key * warningKey)
buffer[10] = i / 10 % 10 + '0';
buffer[11] = i % 10 + '0';
printf ("buffer is: %s\n", buffer);
- strncat (buffer, "/number", sizeof (buffer) - 1);
+ strncat (buffer, "/number", sizeof (buffer) - strlen(buffer) - 1);
printf ("number: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/description", sizeof (buffer) - 1);
+ strncat (buffer, "/description", sizeof (buffer) - strlen(buffer) - 1);
printf ("description: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/ingroup", sizeof (buffer) - 1);
+ strncat (buffer, "/ingroup", sizeof (buffer)- strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("ingroup: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/module", sizeof (buffer) - 1);
+ strncat (buffer, "/module", sizeof (buffer) - strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("module: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/file", sizeof (buffer) - 1);
+ strncat (buffer, "/file", sizeof (buffer) - strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("file: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/line", sizeof (buffer) - 1);
+ strncat (buffer, "/line", sizeof (buffer)- strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("line: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/reason", sizeof (buffer) - 1);
+ strncat (buffer, "/reason", sizeof (buffer) - strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("reason: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/mountpoint", sizeof (buffer) - 1);
+ strncat (buffer, "/mountpoint", sizeof (buffer) - strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("reason: %s\n", keyString (keyGetMeta (warningKey, buffer)));
buffer[12] = '\0';
- strncat (buffer, "/configfile", sizeof (buffer) - 1);
+ strncat (buffer, "/configfile", sizeof (buffer) - strlen(buffer) - 1);
keyGetMeta (warningKey, buffer);
printf ("reason: %s\n", keyString (keyGetMeta (warningKey, buffer)));
}
|
Fixes immediately return NULL for CQs whose relation has already been dropped | @@ -656,13 +656,21 @@ GetContQueryForId(Oid id)
Datum tmp;
bool isnull;
Query *query;
+ char *relname;
if (!HeapTupleIsValid(tup))
return NULL;
- cq = palloc0(sizeof(ContQuery));
row = (Form_pipeline_query) GETSTRUCT(tup);
+ relname = get_rel_name(row->relid);
+ if (relname == NULL)
+ {
+ ReleaseSysCache(tup);
+ return NULL;
+ }
+
+ cq = palloc0(sizeof(ContQuery));
cq->id = id;
cq->oid = HeapTupleGetOid(tup);
|
Fix glibc specific conditional for Mac OS/X
MacOS seems to define __GLIBC__ but not __GLIBC_PREREQ. | @@ -231,7 +231,8 @@ static uint64_t get_timer_bits(void)
# if defined(_POSIX_C_SOURCE) \
&& defined(_POSIX_TIMERS) \
&& _POSIX_C_SOURCE >= 199309L \
- && (!defined(__GLIBC__) || __GLIBC_PREREQ(2, 17))
+ && (!defined(__GLIBC__) \
+ || (defined(__GLIBC_PREREQ) && __GLIBC_PREREQ(2, 17)))
{
struct timespec ts;
clockid_t cid;
|
add option to use wget | @@ -39,21 +39,21 @@ if [ ! -w `pwd` ]; then
exit 1
fi
-if [[ $curl_exists == "true" && $wget_exists == "true" ]]; then
- prompt="Detected both curl and wget, which one would you like to use? [default is curl]"
- options=("curl" "wget")
- PS3="$prompt "
- select opt in "${options[@]}" "Quit"; do
- case "$REPLY" in
-
- 1 ) download_command="curl -O "; break;;
- 2 ) download_command="wget "; break;;
-
- $(( ${#options[@]}+1 )) ) echo "Goodbye!"; exit 1;;
- *) download_command="curl -O "; break;;
-
+# Parse all commandline options
+while [[ "$#" -gt 0 ]]; do
+ case $1 in
+ -w|--wget) force_wget="true"; break;;
+ *) echo "Unknown parameter: $1"; exit 1;;
esac
+ shift
done
+
+if [[ $curl_exists == "true" && $wget_exists == "true" ]]; then
+ if [[ $force_wget == "true" ]]; then
+ download_command="wget "
+ else
+ download_command="curl -O "
+ fi
elif [[ $curl_exists == "true" ]]; then
download_command="curl -O "
elif [[ $wget_exists == "true" ]]; then
|
Fix typo in PSA ECC curve config option
Fix SEC to SECP as the curve name. This fixes failing tests that
verified the config option was working. | @@ -406,7 +406,7 @@ extern "C" {
#if defined(PSA_WANT_ECC_SECP_K1_256)
#if !defined(MBEDTLS_PSA_ACCEL_ECC_SECP_K1_256)
-#define MBEDTLS_ECP_DP_SEC256K1_ENABLED
+#define MBEDTLS_ECP_DP_SECP256K1_ENABLED
#define MBEDTLS_PSA_BUILTIN_ECC_SECP_K1_256 1
#endif /* !MBEDTLS_PSA_ACCEL_ECC_SECP_K1_256 */
#endif /* PSA_WANT_ECC_SECP_K1_256 */
@@ -657,7 +657,7 @@ extern "C" {
#define PSA_WANT_ECC_SECP_K1_224
#endif
-#if defined(MBEDTLS_ECP_DP_SEC256K1_ENABLED)
+#if defined(MBEDTLS_ECP_DP_SECP256K1_ENABLED)
#define MBEDTLS_PSA_BUILTIN_ECC_SECP_K1_256 1
#define PSA_WANT_ECC_SECP_K1_256
#endif
|
docs: update vnfs name for leap | @@ -47,6 +47,6 @@ default location for this example is in
[sms](*\#*) mkdir -p -m 755 $CHROOT # create chroot housing dir
[sms](*\#*) mkdir -m 755 $CHROOT/dev # create chroot /dev dir
[sms](*\#*) mknod -m 666 $CHROOT/dev/zero c 1 5 # create /dev/zero device
-[sms](*\#*) wwmkchroot -v sles-12 $CHROOT # create base image
+[sms](*\#*) wwmkchroot -v opensuse-15.1 $CHROOT # create base image
\end{lstlisting}
% end_ohpc_run
|
Add additional orphan logging | @@ -139,6 +139,8 @@ namespace Miningcore.Blockchain.Bitcoin
block.Status = BlockStatus.Orphaned;
block.Reward = 0;
result.Add(block);
+
+ logger.Info(() => $"[{LogCategory}] Block {block.BlockHeight} classified as orphaned due to daemon error {cmdResult.Error.Code}");
}
else
@@ -153,6 +155,8 @@ namespace Miningcore.Blockchain.Bitcoin
block.Status = BlockStatus.Orphaned;
block.Reward = 0;
result.Add(block);
+
+ logger.Info(() => $"[{LogCategory}] Block {block.BlockHeight} classified as orphaned due to missing tx details");
}
else
|
chore(stale) adjust issue closure timeouts | @@ -11,10 +11,10 @@ jobs:
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.LVGL_BOT_TOKEN }}
- stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
- stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
- close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
- days-before-stale: 30
- days-before-close: 5
+ stale-issue-message: 'This issue is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
+ stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
+ close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
+ days-before-stale: 14
+ days-before-close: 7
exempt-issue-labels: 'pinned'
exempt-pr-labels: 'pinned'
|
FlaatOut 2 update
fixes | @@ -89,8 +89,22 @@ void ShowIntroHook()
//Aspect Ratio
auto pattern = hook::pattern("D9 05 ? ? ? ? 8B 44 24 04 8B 4C 24");
+ if (!pattern.empty())
+ {
injector::WriteMemory(pattern.get_first<float>(2), &Screen.fWidth, true);
injector::WriteMemory(pattern.get_first<float>(18), &Screen.fHeight, true);
+ }
+ else
+ {
+ pattern = hook::pattern("8B 44 24 04 8B 4C 24 08 C7 00 00 00 80");
+ if (!pattern.empty())
+ {
+ injector::WriteMemory<float>(pattern.get_first<float>(10 + 0), Screen.fWidth, true);
+ injector::WriteMemory<float>(pattern.get_first<float>(10 + 6), Screen.fHeight, true);
+ injector::WriteMemory<float>(pattern.get_first<float>(23 + 10 + 0), Screen.fWidth, true);
+ injector::WriteMemory<float>(pattern.get_first<float>(23 + 10 + 6), Screen.fHeight, true);
+ }
+ }
//Hud Scale
pattern = hook::pattern("D9 05 ? ? ? ? 56 8B F1 8B 86"); //53BBF3
|
esp32s3: use wdt/systimer hal rom implement as default for esp32s3 | @@ -67,7 +67,7 @@ menu "Hardware Abstraction Layer (HAL) and Low Level (LL)"
config HAL_SYSTIMER_HAS_ROM_IMPL
bool
- default y if IDF_TARGET_ESP32C2
+ default y if IDF_TARGET_ESP32C2 # TODO: IDF-4917
config HAL_SYSTIMER_ROM_IMPL
bool "Use systimer HAL implementation in ROM"
@@ -84,7 +84,7 @@ menu "Hardware Abstraction Layer (HAL) and Low Level (LL)"
config HAL_WDT_HAS_ROM_IMPL
bool
- default y if IDF_TARGET_ESP32C2
+ default y if IDF_TARGET_ESP32C2 || IDF_TARGET_ESP32S3
config HAL_WDT_ROM_IMPL
bool "Use WDT HAL implementation in ROM"
|
Release: Add note about plugin/binding requirement | @@ -175,6 +175,9 @@ Many problems were resolved with the following fixes:
- The [Shell Recorder][] counts the number of executed tests properly again.
- CMake now fails if the required plugins [list](http://libelektra.org/plugins/list) or [spec](http://libelektra.org/plugins/spec) (on
non-[MinGW](http://mingw.org) platforms) are missing from the current build configuration.
+- The [Lua](http://libelektra.org/plugins/lua), [Python 2](http://libelektra.org/plugins/python2),
+ [Python](http://libelektra.org/plugins/python), and [Ruby](http://libelektra.org/plugins/ruby) plugins now require SWIG bindings for
+ the corresponding programming language.
## Outlook
|
acrn-config: add 'xhci' usb mediator for laag and waag
Enable xhci usb mediator for whl-ipc-i5/whl-ipc-7 and KBL NUC for LaaG
and WaaG.
Acked-by: Victor Sun | @@ -401,6 +401,26 @@ def vboot_arg_set(dm, vmid, config):
print(" $boot_image_option \\",file=config)
+def xhci_args_set(names, vmid, config):
+ board_name = names['board_name']
+ uos_type = names['uos_types'][vmid]
+
+ # Get locate of vmid
+ idx = 0
+ #if board_name not in ("whl-ipc-i5", "whl-ipc-i7"):
+ if uos_type in ("CLEARLINUX", "WINDOWS") and board_name in ("whl-ipc-i5", "whl-ipc-i7", "nuc7i7dnb"):
+ for num in names['uos_types'].keys():
+ idx += 1
+ if num == vmid:
+ break
+
+ # if the vmid is first vm
+ if idx == 1:
+ print(" -s {},xhci,1-2:2-2 \\".format(launch_cfg_lib.virtual_dev_slot("xhci")), file=config)
+ elif idx > 1:
+ print(" -s {},xhci,1-2:2-4 \\".format(launch_cfg_lib.virtual_dev_slot("xhci")), file=config)
+
+
def dm_arg_set(names, sel, dm, vmid, config):
uos_type = names['uos_types'][vmid]
@@ -466,6 +486,9 @@ def dm_arg_set(names, sel, dm, vmid, config):
print("{} \\".format(dm_str), file=config)
print(" --windows \\", file=config)
+ # WA: XHCI args set
+ xhci_args_set(names, vmid, config)
+
# GVT args set
gvt_arg_set(uos_type, config)
@@ -498,7 +521,7 @@ def dm_arg_set(names, sel, dm, vmid, config):
if not is_nuc_whl_clr(names, vmid):
print(" -s {},wdt-i6300esb \\".format(launch_cfg_lib.virtual_dev_slot("wdt-i6300esb")), file=config)
- print(" -s {},xhci,1-1:1-2:1-3:2-1:2-2:2-3:cap=apl \\".format(launch_cfg_lib.virtual_dev_slot("xhci")), file=config)
+ #print(" -s {},xhci,1-1:1-2:1-3:2-1:2-2:2-3:cap=apl \\".format(launch_cfg_lib.virtual_dev_slot("xhci")), file=config)
if dm['vbootloader'][vmid] and dm['vbootloader'][vmid] == "vsbl":
print(" -s {},virtio-blk$boot_dev_flag,/data/{} \\".format(launch_cfg_lib.virtual_dev_slot("virtio-blk"), root_img), file=config)
|
Assign used_msg as soon as message start to be read. | @@ -738,9 +738,9 @@ error_return_t MsgAlloc_PullMsgFromLuosTask(uint16_t luos_task_id, msg_t **retur
//find the oldest message allocated to this module
if (luos_task_id < luos_tasks_stack_id)
{
- *returned_msg = luos_tasks[luos_task_id].msg_pt;
+ used_msg = luos_tasks[luos_task_id].msg_pt;
+ *returned_msg = (msg_t *)used_msg;
// Clear the slot by sliding others to the left on it
- used_msg = *returned_msg;
MsgAlloc_ClearLuosTask(luos_task_id);
return SUCCEED;
}
|
Fix triggered an assetion when replacing __cl_printf
Fix LLVM assertion was triggered when replacing calls to __cl_printf
to __pocl_printf due to return value type mismatch. LLVM changed
return value of __cl_printf to void when no one was using the value
and thus lead to the issue. | @@ -546,9 +546,15 @@ static void replacePrintfCalls(Value *pb, Value *pbp, Value *pbc, bool isKernel,
CallInst *CI = it.first;
CallInst *newCI = it.second;
+ // LLVM may modify the result type of the called function to void.
+ if (CI->getType()->isVoidTy()) {
+ newCI->insertBefore(CI);
+ CI->eraseFromParent();
+ } else {
CI->replaceAllUsesWith(newCI);
ReplaceInstWithInst(CI, newCI);
}
+ }
replaceCIMap.clear();
|
Docs: incorrect partitioned table example | <p>This example <cmdname>CREATE TABLE</cmdname> command creates a range partitioned table.</p>
<codeblock>CREATE TABLE sales(order_id int, item_id int, amount numeric(15,2),
date date, yr_qtr int)
- <b>range partitioned by yr_qtr</b>;</codeblock>
+ PARTITION BY RANGE (yr_qtr) (start (201501) INCLUSIVE end (201504) INCLUSIVE,
+ start (201601) INCLUSIVE end (201604) INCLUSIVE,
+ start (201701) INCLUSIVE end (201704) INCLUSIVE,
+ start (201801) INCLUSIVE end (201804) INCLUSIVE,
+ start (201901) INCLUSIVE end (201904) INCLUSIVE,
+ start (202001) INCLUSIVE end (202004) INCLUSIVE);</codeblock>
<p>GPORCA improves on these types of queries against partitioned tables:</p>
<ul id="ul_jdl_zwd_gr">
<li>Full table scan. Partitions are not enumerated in
|
build: Makefile clean gcov files | @@ -347,6 +347,8 @@ cscope:
clean:
$(RM) *.[odsa] $(SUBDIRS:%=%/*.[odsa])
+ $(RM) *.gcno $(SUBDIRS:%=%/*.gcno)
+ $(RM) *.gcda $(SUBDIRS:%=%/*.gcda)
$(RM) *.elf $(TARGET).lid *.map $(TARGET).lds $(TARGET).lid.xz
$(RM) include/asm-offsets.h version.c .version
$(RM) skiboot.info external/gard/gard.info external/pflash/pflash.info
|
ipfix-export: don't check the result of pool_get
The code to check the exp is set after the call to pool_get()
is marked as unreachable in coverity. This is becasue if it
fails in pool_get then the it panics. Remove the unreachable code.
Type: fix | @@ -107,8 +107,6 @@ vl_api_set_ipfix_exporter_t_internal (
if (pool_elts (frm->exporters) >= IPFIX_EXPORTERS_MAX)
return VNET_API_ERROR_INVALID_VALUE;
pool_get (frm->exporters, exp);
- if (!exp)
- return VNET_API_ERROR_INVALID_VALUE;
}
}
else
|
viofs-svc: fix coding style in VirtFsFuseRequest function. | @@ -245,12 +245,11 @@ static NTSTATUS VirtFsFuseRequest(HANDLE Device,
NTSTATUS Status = STATUS_SUCCESS;
DWORD BytesReturned = 0;
BOOL Result;
- struct fuse_out_header *hdr = OutBuffer;
+ struct fuse_in_header *in_hdr = InBuffer;
+ struct fuse_out_header *out_hdr = OutBuffer;
- DBG(">>req: %d unique: %Iu len: %u",
- ((struct fuse_in_header *)InBuffer)->opcode,
- ((struct fuse_in_header *)InBuffer)->unique,
- ((struct fuse_in_header *)InBuffer)->len);
+ DBG(">>req: %d unique: %Iu len: %u", in_hdr->opcode, in_hdr->unique,
+ in_hdr->len);
Result = DeviceIoControl(Device, IOCTL_VIRTFS_FUSE_REQUEST,
InBuffer, InBufferSize, OutBuffer, OutBufferSize,
@@ -261,9 +260,10 @@ static NTSTATUS VirtFsFuseRequest(HANDLE Device,
return FspNtStatusFromWin32(GetLastError());
}
- DBG("<<len=%u error=%d unique=%Iu", hdr->len, hdr->error, hdr->unique);
+ DBG("<<len: %u error: %d unique: %Iu", out_hdr->len, out_hdr->error,
+ out_hdr->unique);
- if (BytesReturned != hdr->len)
+ if (BytesReturned != out_hdr->len)
{
DBG("BytesReturned != hdr->len");
}
@@ -275,9 +275,9 @@ static NTSTATUS VirtFsFuseRequest(HANDLE Device,
// XXX return STATUS_UNSUCCESSFUL;
}
- if (hdr->error < 0)
+ if (out_hdr->error < 0)
{
- switch (hdr->error)
+ switch (out_hdr->error)
{
case -EPERM:
Status = STATUS_ACCESS_DENIED;
|
mmapstorage: remove unused variable, closes | @@ -193,7 +193,6 @@ static int copyFile (int sourceFd, int destFd)
return -1;
}
- char * pos = buf;
ssize_t writtenBytes = 0;
while (readBytes > 0)
{
@@ -206,7 +205,6 @@ static int copyFile (int sourceFd, int destFd)
return -1;
}
readBytes -= writtenBytes;
- pos += writtenBytes;
}
}
|
less sim time for hls_search | n=0 # count amount of tests executed (exception for subsecond calls)
max_rc=0 # track the maximum RC to return at the end
loops=1;
- rnd10=$((1+RANDOM%10))
- rnd20=$((1+RANDOM%20))
- rnd32=$((1+RANDOM%32))
- rnd1k=$((1+RANDOM%1024))
+ rnd10=$((2+RANDOM%9))
+ rndodd20=$((10+2*RANDOM%5))
+ rnd20=$((2+RANDOM%19))
+ rnd32=$((2+RANDOM%31))
+ rnd1k=$((2+RANDOM%1023))
rnd1k4k=$((1024+RANDOM%3072))
- rnd16k=$((1+RANDOM%16384))
+ rnd16k=$((2+RANDOM%16383))
rnd32k=$((RANDOM))
# export SNAP_TRACE=0xFF
# export SNAP_TRACE=0xF2 # for Sven
#
if [[ "$t0l" == "10140001" || "${env_action}" == "hdl_nvme_example" ]];then echo -e "$del\ntesting hdl_nvme_example"
step "snap_cblk -h" # write max 2blk, read max 32blk a 512B
- options="-n32 -t1" # 512B blocks, one thread
- export CBLK_BUSYTIMEOUT=350
- export CBLK_REQTIMEOUT=360
+ options="-n"${rndodd20}" -t1" # 512B blocks, one thread
+ export CBLK_BUSYTIMEOUT=1500 # used for threads waiting for free slot
+ export CBLK_REQTIMEOUT=1000 # should be smaller than busytimeout
# export SNAP_TRACE=0xFFF
for blk in 1 2;do p8=$((blk*8)); p4k=$((blk*4096)); # no of 512B blocks and pagesize in 4kB blocks
echo "generate data for $blk blocks, $p8 pages, $p4k bytes"
step "snap_cblk $options -b2 --read cblk_read2.bin"
diff cblk_read1.bin cblk_read2.bin
- for blk in 1 2 4 8 16 32;do byte=$((blk*512))
+ for blk in 1 ${rndodd20};do byte=$((blk*512))
step "snap_cblk $options -b2 --write cblk_read2.bin"
step "snap_cblk $options -b${blk} --read cblk_read3.bin"
diff cblk_read2.bin cblk_read3.bin
|
Ensure that assert's and args are effect-free | @@ -523,6 +523,10 @@ func (p *parser) parseAssertNode() (*a.Node, error) {
if err != nil {
return nil, err
}
+ if condition.Effect() != 0 {
+ return nil, fmt.Errorf(`parse: assert-condition %q is not effect-free at %s:%d`,
+ condition.Str(p.tm), p.filename, p.line())
+ }
reason, args := t.ID(0), []*a.Node(nil)
if p.peek1() == t.IDVia {
p.src = p.src[1:]
@@ -891,6 +895,10 @@ func (p *parser) parseArgNode() (*a.Node, error) {
if err != nil {
return nil, err
}
+ if value.Effect() != 0 {
+ return nil, fmt.Errorf(`parse: arg-value %q is not effect-free at %s:%d`,
+ value.Str(p.tm), p.filename, p.line())
+ }
return a.NewArg(name, value).AsNode(), nil
}
|
Document DESTDIR | @@ -97,6 +97,15 @@ muse.
Links with the runtime $MYR_RT instead of the default of
prefix/lib/myr/_myrrt.o.
+.TP
+.B DESTDIR
+Specified that when the files installed by
+.I mbld install
+are put into place, the paths are prefixed with
+.I DESTDIR.
+This fits with the conventions of many package systems,
+allowing Myrddin code to be packaged more easily.
+
.SH FILES
.TP
|
corrects boot comments | @@ -1293,7 +1293,6 @@ _sist_dawn(u3_noun sed)
c3_c* url_c = eth_t ? u3_Host.ops_u.eth_c : "http://localhost:8545";
{
- // XX check parent if moon
// +hull:constitution:ethe: on-chain state
u3_noun hul;
@@ -1572,7 +1571,7 @@ u3_sist_boot(void)
u3z(rac);
}
- // Create the ship directory.
+ // Create the event log
_sist_zest();
}
}
|
taeko: add thermal temperature
BRANCH=main
TEST=make -j BOARD=taeko | @@ -322,26 +322,26 @@ const struct temp_sensor_t temp_sensors[] = {
BUILD_ASSERT(ARRAY_SIZE(temp_sensors) == TEMP_SENSOR_COUNT);
/*
- * TODO(b/180681346): update for Alder Lake/brya
+ * TODO(b/201021109): update for Alder Lake/brya
*
* Tiger Lake specifies 100 C as maximum TDP temperature. THRMTRIP# occurs at
* 130 C. However, sensor is located next to DDR, so we need to use the lower
- * DDR temperature limit (85 C)
+ * DDR temperature limit (100 C)
*/
static const struct ec_thermal_config thermal_cpu = {
.temp_host = {
- [EC_TEMP_THRESH_HIGH] = C_TO_K(70),
- [EC_TEMP_THRESH_HALT] = C_TO_K(80),
+ [EC_TEMP_THRESH_HIGH] = C_TO_K(90),
+ [EC_TEMP_THRESH_HALT] = C_TO_K(100),
},
.temp_host_release = {
- [EC_TEMP_THRESH_HIGH] = C_TO_K(65),
+ [EC_TEMP_THRESH_HIGH] = C_TO_K(85),
},
.temp_fan_off = C_TO_K(35),
- .temp_fan_max = C_TO_K(50),
+ .temp_fan_max = C_TO_K(60),
};
/*
- * TODO(b/180681346): update for Alder Lake/brya
+ * TODO(b/201021109): update for Alder Lake/brya
*
* Inductor limits - used for both charger and PP3300 regulator
*
@@ -351,18 +351,18 @@ static const struct ec_thermal_config thermal_cpu = {
* PP3300 regulator: operating range -40 C to 145 C
*
* Inductors: limit of 125c
- * PCB: limit is 80c
+ * PCB: limit is 100c
*/
static const struct ec_thermal_config thermal_fan = {
.temp_host = {
- [EC_TEMP_THRESH_HIGH] = C_TO_K(75),
- [EC_TEMP_THRESH_HALT] = C_TO_K(80),
+ [EC_TEMP_THRESH_HIGH] = C_TO_K(90),
+ [EC_TEMP_THRESH_HALT] = C_TO_K(100),
},
.temp_host_release = {
- [EC_TEMP_THRESH_HIGH] = C_TO_K(65),
+ [EC_TEMP_THRESH_HIGH] = C_TO_K(85),
},
- .temp_fan_off = C_TO_K(40),
- .temp_fan_max = C_TO_K(55),
+ .temp_fan_off = C_TO_K(35),
+ .temp_fan_max = C_TO_K(60),
};
/* this should really be "const" */
|
Fix build on FreeBSD/powerpc64le | @@ -21,6 +21,8 @@ ifeq ($(ARCH), amd64)
override ARCH=x86_64
else ifeq ($(ARCH), powerpc64)
override ARCH=power
+else ifeq ($(ARCH), powerpc64le)
+override ARCH=power
else ifeq ($(ARCH), powerpc)
override ARCH=power
else ifeq ($(ARCH), i386)
|
docs(bar) fix default range
Related: | @@ -22,7 +22,7 @@ Not only the end, but also the start value of the bar can be set, which changes
### Value and range
A new value can be set by `lv_bar_set_value(bar, new_value, LV_ANIM_ON/OFF)`.
The value is interpreted in a range (minimum and maximum values) which can be modified with `lv_bar_set_range(bar, min, max)`.
-The default range is 1..100.
+The default range is 0..100.
The new value in `lv_bar_set_value` can be set with or without an animation depending on the last parameter (`LV_ANIM_ON/OFF`).
|
Enable compiling with older versions of GCC | * express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
+#include <stdlib.h>
#include <sys/param.h>
#include "pq-crypto/pq-random.h"
|
Avoid erasing entire flash and add progress bar | #include <stdlib.h>
using namespace blit;
+constexpr uint32_t qspi_flash_sector_size = 64 * 1024;
+
extern QSPI_HandleTypeDef hqspi;
extern CDCCommandStream g_commandStream;
@@ -103,9 +105,16 @@ bool FlashLoader::Flash(const char *pszFilename)
return false;
}
- // quick and dirty erase
- QSPI_WriteEnable(&hqspi);
- qspi_chip_erase();
+ // erase the sectors needed to write the image
+ uint32_t sector_count = (bytes_total / qspi_flash_sector_size) + 1;
+
+ progress.show("Erasing flash sectors...", sector_count);
+
+ for(uint32_t sector = 0; sector < sector_count; sector++) {
+ qspi_sector_erase(sector * qspi_flash_sector_size);
+
+ progress.update(sector);
+ }
progress.show("Copying from SD card to flash...", bytes_total);
|
sharpen the support section | @@ -55,6 +55,22 @@ works in the same way as described for OpenID Connect above. See the [Wiki](http
For an exhaustive description of all configuration options, see the file `auth_openidc.conf`
in this directory. This file can also serve as an include file for `httpd.conf`.
+Support
+-------
+
+#### Community Support
+For generic questions, see the Wiki pages with Frequently Asked Questions at:
+ [https://github.com/zmartzone/mod_auth_openidc/wiki](https://github.com/zmartzone/mod_auth_openidc/wiki)
+There is a Google Group/mailing list at:
+ [[email protected]](mailto:[email protected])
+The corresponding forum/archive is at:
+ [https://groups.google.com/forum/#!forum/mod_auth_openidc](https://groups.google.com/forum/#!forum/mod_auth_openidc)
+Any questions/issues should go to the mailing list. The Github issues tracker should be used only for bugs reports and feature requests.
+
+#### Commercial Services
+For commercial Support contracts, Professional Services, Training and use-case specific support you can contact:
+ [[email protected]](mailto:[email protected])
+
How to Use It
-------------
@@ -156,22 +172,8 @@ OIDCOAuthVerifySharedKeys plain##<shared-secret-to-validate-symmetric-jwt-signat
</Location>
```
-Support
--------
-
-See the Wiki pages with Frequently Asked Questions at:
- https://github.com/zmartzone/mod_auth_openidc/wiki
-There is a Google Group/mailing list at:
- [[email protected]](mailto:[email protected])
-The corresponding forum/archive is at:
- https://groups.google.com/forum/#!forum/mod_auth_openidc
-For commercial support and consultancy you can contact:
- [[email protected]](mailto:[email protected])
-
-Any questions/issues should go to the mailing list. The Github issues tracker should be used only for bugs reports and feature requests.
-
Disclaimer
----------
-*This software is open sourced by ZmartZone IAM. For commercial support
-you can contact [ZmartZone IAM](https://www.zmartzone.eu) as described above.*
+*This software is open sourced by ZmartZone IAM. For commercial services
+you can contact [ZmartZone IAM](https://www.zmartzone.eu) as described above in the [Support](#support) section.*
|
proc: allow for clearing locks set by another thread
JIRA: | @@ -1539,11 +1539,11 @@ static void proc_lockUnlock(lock_t *lock)
static int _proc_lockClear(lock_t *lock)
{
- thread_t *current = proc_current();
+ thread_t *owner = lock->owner;
spinlock_ctx_t sc;
int ret;
- if (lock->owner != current) {
+ if (owner == NULL) {
return -EPERM;
}
@@ -1551,7 +1551,7 @@ static int _proc_lockClear(lock_t *lock)
if (ret) {
hal_spinlockSet(&threads_common.spinlock, &sc);
- current->priority = _proc_threadGetPriority(current);
+ owner->priority = _proc_threadGetPriority(owner);
hal_spinlockClear(&threads_common.spinlock, &sc);
}
|
Add required libfl-dev in Ubuntu Bionic
Otherwise the `make` fails with lack of `FlexLexer.h` error. | @@ -335,7 +335,7 @@ sudo apt-get update
# For Bionic (18.04 LTS)
sudo apt-get -y install bison build-essential cmake flex git libedit-dev \
- libllvm6.0 llvm-6.0-dev libclang-6.0-dev python zlib1g-dev libelf-dev
+ libllvm6.0 llvm-6.0-dev libclang-6.0-dev python zlib1g-dev libelf-dev libfl-dev
# For Eoan (19.10) or Focal (20.04.1 LTS)
sudo apt install -y bison build-essential cmake flex git libedit-dev \
|
Propagated NXT_RUBY_CFLAGS to Ruby checks.
This fixes an issue addressed in on FreeBSD 12.
The problem manifested itself as:
configuring Ruby module
checking for -fdeclspec ... found
checking for Ruby library ... not found
checking for Ruby library in /usr/local/lib ... not found
./configure: error: no Ruby found. | @@ -101,7 +101,7 @@ if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then
nxt_feature="Ruby library"
nxt_feature_name=""
nxt_feature_run=value
- nxt_feature_incs="${NXT_RUBY_INCPATH}"
+ nxt_feature_incs="${NXT_RUBY_INCPATH} ${NXT_RUBY_CFLAGS}"
nxt_feature_libs="${NXT_RUBY_LIBS}"
nxt_feature_test="
#include <ruby.h>
@@ -125,7 +125,7 @@ if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then
nxt_feature="Ruby library in $NXT_RUBY_LIBPATH"
nxt_feature_name=""
nxt_feature_run=no
- nxt_feature_incs="${NXT_RUBY_INCPATH}"
+ nxt_feature_incs="${NXT_RUBY_INCPATH} ${NXT_RUBY_CFLAGS}"
nxt_feature_libs="${NXT_RUBY_LIBS}"
nxt_feature_test="
#include <ruby.h>
@@ -153,7 +153,7 @@ fi
nxt_feature="Ruby version"
nxt_feature_name=""
nxt_feature_run=value
-nxt_feature_incs="${NXT_RUBY_INCPATH}"
+nxt_feature_incs="${NXT_RUBY_INCPATH} ${NXT_RUBY_CFLAGS}"
nxt_feature_libs="${NXT_RUBY_LIBS}"
nxt_feature_test="
#include <ruby.h>
|
[apps] Fix default -march for LLVM compilation in CI | @@ -32,7 +32,11 @@ COMPILER ?= llvm
RISCV_XLEN ?= 32
-# Compiler -march
+RISCV_ABI ?= ilp32
+RISCV_TARGET ?= riscv$(RISCV_XLEN)-unknown-elf
+ifeq ($(COMPILER),gcc)
+ # Use GCC
+ # GCC compiler -march
ifeq ($(xpulpimg),1)
RISCV_ARCH ?= rv$(RISCV_XLEN)imaXpulpimg
RISCV_ARCH_AS ?= $(RISCV_ARCH)
@@ -40,17 +44,16 @@ else
RISCV_ARCH ?= rv$(RISCV_XLEN)ima
RISCV_ARCH_AS ?= $(RISCV_ARCH)Xpulpv2
endif
-
-RISCV_ABI ?= ilp32
-RISCV_TARGET ?= riscv$(RISCV_XLEN)-unknown-elf
-ifeq ($(COMPILER),gcc)
- # Use GCC
+ # GCC Toolchain
RISCV_PREFIX ?= $(GCC_INSTALL_DIR)/bin/$(RISCV_TARGET)-
RISCV_CC ?= $(RISCV_PREFIX)gcc
RISCV_CXX ?= $(RISCV_PREFIX)g++
RISCV_OBJDUMP ?= $(RISCV_PREFIX)objdump
else
# Use LLVM by default
+ # LLVM compiler -march
+ RISCV_ARCH ?= rv$(RISCV_XLEN)ima
+ # GCC Toolchain
RISCV_PREFIX ?= $(LLVM_INSTALL_DIR)/bin/llvm-
RISCV_CC ?= $(LLVM_INSTALL_DIR)/bin/clang
RISCV_CXX ?= $(LLVM_INSTALL_DIR)/bin/clang++
|
Add Fujitsu compiler | @@ -65,6 +65,7 @@ $compiler = OPEN64 if ($data =~ /COMPILER_OPEN64/);
$compiler = SUN if ($data =~ /COMPILER_SUN/);
$compiler = IBM if ($data =~ /COMPILER_IBM/);
$compiler = DEC if ($data =~ /COMPILER_DEC/);
+$compiler = FUJITSU if ($data =~ /COMPILER_FUJITSU/);
$compiler = GCC if ($compiler eq "");
$os = Linux if ($data =~ /OS_LINUX/);
@@ -189,6 +190,10 @@ if ($compiler eq "GCC" || $compiler eq "LSB") {
$openmp = "-fopenmp";
}
+if ($compiler eq "FUJITSU") {
+ $openmp = "-Kopenmp";
+}
+
if ($defined == 0) {
$compiler_name .= " -m32" if ($binary eq "32");
$compiler_name .= " -m64" if ($binary eq "64");
|
Removed leftover from 939 | @@ -936,7 +936,6 @@ static void inter_recon_bipred_avx2(const int hi_prec_luma_rec0,
break;
case 16:
- _MM_SHUFFLE
temp_epi8 = _mm256_permute4x64_epi64(_mm256_packus_epi16(temp_y_epi16, temp_y_epi16), _MM_SHUFFLE(0, 2, 1, 3));
_mm_storeu_si128((__m128i*)&(lcu->rec.y[(y_in_lcu)* LCU_WIDTH + x_in_lcu]), _mm256_castsi256_si128(temp_epi8));
|
config_tools: remove the assume of virtio-net device name
Since PR has removed the assume of virtio-net device name,
we also remove it in the launch script generation logic. | @@ -128,7 +128,7 @@ function add_virtual_device() {
if [ "${kind}" = "virtio-net" ]; then
# Create the tap device
tap_conf=${options%,*}
- create_tap "tap_${tap_conf#tap=}" >> /dev/stderr
+ create_tap "${tap_conf#tap=}" >> /dev/stderr
fi
echo -n "-s ${slot},${kind}"
|
fortuna reorg | @@ -2749,8 +2749,6 @@ bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex, bool fJustCheck)
}
}
- if (FortunaReorgBlock) FortunaReorgBlock = false;
-
// ppcoin: track money supply and mint amount info
pindex->nMint = nValueOut - nValueIn + nFees;
pindex->nMoneySupply = (pindex->pprev? pindex->pprev->nMoneySupply : 0) + nValueOut - nValueIn;
@@ -2836,6 +2834,8 @@ bool static Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
{
printf("REORGANIZE\n");
+ FortunaReorgBlock = true;
+
// Find the fork
CBlockIndex* pfork = pindexBest;
CBlockIndex* plonger = pindexNew;
@@ -2930,6 +2930,7 @@ bool static Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
mempool.removeConflicts(tx);
}
+ FortunaReorgBlock = false;
printf("REORGANIZE: done\n");
return true;
@@ -2999,11 +3000,6 @@ bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew)
if (!vpindexSecondary.empty())
printf("Postponing %"PRIszu" reconnects\n", vpindexSecondary.size());
-
- if (vpindexSecondary.size() > nCoinbaseMaturity) {
- // printf("Disabling Fortuna stake checks to allow reorganization of matured blocks without block-current MN list.");
- FortunaReorgBlock = true;
- }
// Switch to new best branch
if (!Reorganize(txdb, pindexIntermediate))
{
@@ -3029,6 +3025,8 @@ bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew)
if (!block.SetBestChainInner(txdb, pindex))
break;
}
+
+
}
// Update best block in wallet (so we can detect restored wallets)
|
Adding ota image geneator script | @@ -328,7 +328,9 @@ add_custom_command(
COMMAND "${xc32_bin2hex}" "$<TARGET_FILE:${exe_target}>"
COMMAND "echo" "Running xc32-objcopy"
COMMAND "echo" ${XC32_BIN}/xc32_objcopy
- COMMAND "${xc32_objcopy}" -I ihex $<TARGET_FILE_DIR:${exe_target}>/${exe_target}.hex -O binary $<TARGET_FILE_DIR:${exe_target}>/${exe_target}.intermediate.hex
+ COMMAND "${xc32_objcopy}" -I ihex $<TARGET_FILE_DIR:${exe_target}>/${exe_target}.hex -O binary $<TARGET_FILE_DIR:${exe_target}>/${exe_target}.intermediate.bin
+ COMMAND "echo" "Creating binary image"
+ COMMAND "python" ${AFR_DEMOS_DIR}/ota/bootloader/utility/ota_image_generator.py -b $<TARGET_FILE_DIR:${exe_target}>/${exe_target}.intermediate.bin -p MCHP-Curiosity-PIC32MZEF
)
# COMMAND "${xc32_bin2hex}" "$<TARGET_FILE:${exe_target}>"
|
armv7: To support domain spanning, use different vregion per core
This should fix the proc_mgmt_test on armv7 | #include <stdio.h>
// Location of VSpace managed by this system.
-#define VSPACE_BEGIN ((lvaddr_t)1UL*1024*1024*1024) //0x40000000
+#define VSPACE_BEGIN ((lvaddr_t)(256UL << 20) * (disp_get_core_id() + 1))
// Amount of virtual address space reserved for mapping frames
// backing refill_slabs.
@@ -855,8 +855,7 @@ static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
}
} else if (pt) {
#ifdef LIBBARRELFISH_DEBUG_PMAP
- debug_printf("section unmap: entry = %zu, pte_count = %zu\n",
- pt->entry, pt->u.frame.kernel_pte_count);
+ debug_printf("section unmap: entry = %zu\n", pt->entry);
#endif
err = vnode_unmap(pmap->root.u.vnode.cap, pt->mapping);
if (err_is_fail(err)) {
|
Fix bottle upload problem & typo | on:
push:
paths:
- - '**/nightlyHomebrew-build.yml'
+ - '**/nightly-Homebrew-build.yml'
pull_request:
branches:
- develop
@@ -51,13 +51,16 @@ jobs:
# the HEAD flags tell Homebrew to build the develop branch fetch via git
- name: Create bottle
- run: brew bottle -v openblas
+ run: |
+ brew bottle -v openblas
+ mkdir bottles
+ mv *.bottle.tar.gz bottles
- name: Upload bottle
uses: actions/upload-artifact@v1
with:
name: openblas--HEAD.catalina.bottle.tar.gz
- paht: ./*.bottle.*
+ path: bottles
- name: Show linkage
run: brew linkage -v openblas
|
clay: print stacktrace on build failure | !:
=- ?: ?=(%& -<) p.-
%. [[~ ~] fod]
- (slog leaf+"clay: read-at-aeon fail {<[desk=syd mun]>}" ~)
+ (slog leaf+"clay: read-at-aeon fail {<[desk=syd mun]>}" p.-)
%- mule |.
?- care.mun
%d
|
VOM: routes support multipath so set is_multipath in route update | @@ -49,7 +49,7 @@ update_cmd::issue(connection& con)
payload.table_id = m_id;
payload.is_add = 1;
- payload.is_multipath = 0;
+ payload.is_multipath = 1;
m_prefix.to_vpp(&payload.is_ipv6, payload.dst_address,
&payload.dst_address_length);
|
docs/esp32: Fix machine.Timer quickref to specify HW timers.
Also remove trailing spaces on other lines. | @@ -118,17 +118,21 @@ Use the :mod:`time <utime>` module::
Timers
------
-Virtual (RTOS-based) timers are supported. Use the :ref:`machine.Timer <machine.Timer>` class
-with timer ID of -1::
+The ESP32 port has four hardware timers. Use the :ref:`machine.Timer <machine.Timer>` class
+with a timer ID from 0 to 3 (inclusive)::
from machine import Timer
- tim = Timer(-1)
- tim.init(period=5000, mode=Timer.ONE_SHOT, callback=lambda t:print(1))
- tim.init(period=2000, mode=Timer.PERIODIC, callback=lambda t:print(2))
+ tim0 = Timer(0)
+ tim0.init(period=5000, mode=Timer.ONE_SHOT, callback=lambda t:print(0))
+
+ tim1 = Timer(1)
+ tim1.init(period=2000, mode=Timer.PERIODIC, callback=lambda t:print(1))
The period is in milliseconds.
+Virtual timers are not currently supported on this port.
+
.. _Pins_and_GPIO:
Pins and GPIO
|
Minor fix of code duplication.
Removes 3~ lines of code that didn't need to be restated. | @@ -186,11 +186,7 @@ static void log_kernel(void) {
static bool drop_permissions(void) {
if (getuid() != geteuid() || getgid() != getegid()) {
- if (setgid(getgid()) != 0) {
- sway_log(SWAY_ERROR, "Unable to drop root, refusing to start");
- return false;
- }
- if (setuid(getuid()) != 0) {
+ if (setuid(getuid()) != 0 || setgid(getgid()) != 0) {
sway_log(SWAY_ERROR, "Unable to drop root, refusing to start");
return false;
}
|
decision: add some ideas by | @@ -108,6 +108,26 @@ This allows notification and change tracking functionality to work determine and
A similiar thing was already attempted for values, i.e. `meta:/origvalue`.
+### Create an API for transformations
+
+Plugins must use a special function to transform key names, e.g.:
+
+```c
+typedef const char * (*ElektraNameTransform)(const Key *);
+
+int elektraApplyNameTransform (KeySet * ks, ElektraNameTransform transform);
+```
+
+How the `elektraApplyNameTransform` function marks the original name is an internal implementation detail.
+May be as `meta:/` or something else entireley.
+
+Something similar could be done for the value of a key as well.
+
+
+### Introduce a new phase for transformations
+
+We could also introduce a new phase between before/after storage exclusively for transformations.
+Then we can just do a "fake" call to that phase to get back the runtime names for change tracking.
## Decision
|
odissey: refactor logger function | @@ -46,16 +46,22 @@ typedef struct
int msg_len;
} od_logger_msg_t;
-static char *od_logger_event_tab[] =
+typedef struct {
+ od_logsystem_prio_t syslog_prio;
+ char *ident;
+ char *ident_short;
+} od_logger_ident_t;
+
+static od_logger_ident_t od_logger_ident_tab[] =
{
- [OD_LOG] = "info",
- [OD_LOG_ERROR] = "error",
- [OD_LOG_CLIENT] = "client_info",
- [OD_LOG_CLIENT_ERROR] = "client_error",
- [OD_LOG_CLIENT_DEBUG] = "client_debug",
- [OD_LOG_SERVER] = "server_info",
- [OD_LOG_SERVER_ERROR] = "server_error",
- [OD_LOG_SERVER_DEBUG] = "server_debug"
+ [OD_LOG] = { OD_LOGSYSTEM_INFO, "info", "" },
+ [OD_LOG_ERROR] = { OD_LOGSYSTEM_ERROR, "error", "error" },
+ [OD_LOG_CLIENT] = { OD_LOGSYSTEM_INFO, "client_info", "" },
+ [OD_LOG_CLIENT_ERROR] = { OD_LOGSYSTEM_ERROR, "client_error", "error" },
+ [OD_LOG_CLIENT_DEBUG] = { OD_LOGSYSTEM_DEBUG, "client_debug", "debug" },
+ [OD_LOG_SERVER] = { OD_LOGSYSTEM_INFO, "server_info", "" },
+ [OD_LOG_SERVER_ERROR] = { OD_LOGSYSTEM_ERROR, "server_error", "error" },
+ [OD_LOG_SERVER_DEBUG] = { OD_LOGSYSTEM_DEBUG, "server_debug", "debug" }
};
void od_logger_init(od_logger_t *logger, od_pid_t *pid)
@@ -113,32 +119,15 @@ void od_loggerv(od_logger_t *logger,
(signed)tv.tv_usec / 1000);
msg.timestamp_len = buf_len - msg.timestamp_len;
- /* event */
- od_logsystem_prio_t prio;
- char *ident;
- msg.event = od_logger_event_tab[event];
+ /* ident */
+ od_logger_ident_t *ident;
+ ident = &od_logger_ident_tab[event];
+
+ msg.event = ident->ident;
msg.event_len = strlen(msg.event);
- switch (event) {
- case OD_LOG:
- case OD_LOG_CLIENT:
- case OD_LOG_SERVER:
- ident = NULL;
- prio = OD_LOGSYSTEM_INFO;
- break;
- case OD_LOG_ERROR:
- case OD_LOG_CLIENT_ERROR:
- case OD_LOG_SERVER_ERROR:
- ident = "error: ";
- prio = OD_LOGSYSTEM_ERROR;
- break;
- case OD_LOG_CLIENT_DEBUG:
- case OD_LOG_SERVER_DEBUG:
- ident = "debug: ";
- prio = OD_LOGSYSTEM_DEBUG;
- break;
- }
- if (ident) {
- buf_len += snprintf(buf + buf_len, sizeof(buf) - buf_len, "%s", ident);
+ if (*ident->ident_short) {
+ buf_len += snprintf(buf + buf_len, sizeof(buf) - buf_len,
+ "%s: ", ident->ident_short);
}
/* id */
@@ -175,7 +164,7 @@ void od_loggerv(od_logger_t *logger,
/* write log message */
od_logfile_write(&logger->log, buf, buf_len);
- od_logsystem(&logger->log_system, prio, buf, buf_len);
+ od_logsystem(&logger->log_system, ident->syslog_prio, buf, buf_len);
write(0, buf, buf_len);
}
|
Scroll to bottom after printing | @@ -34,6 +34,7 @@ class ConsoleViewController: UIViewController, UITextViewDelegate {
DispatchQueue.main.async {
self.textView?.text.append(output)
self.textViewDidChange(self.textView)
+ self.textView?.scrollToBottom()
}
}
}
|
README.md: Add downloads counter badge | @@ -2,11 +2,12 @@ Open source version of the STMicroelectronics Stlink Tools
==========================================================
[](https://github.com/texane/stlink/releases/latest)
+[](https://raw.githubusercontent.com/hyperium/hyper/master/LICENSE)
[](https://github.com/texane/stlink/compare/1.3.0...master)
+[](https://github.com/texane/stlink/releases)
[](https://travis-ci.org/texane/stlink)
[](https://jenkins.ncrmnt.org/job/GithubCI/job/stlink/)
[](https://ci.appveyor.com/project/xor-gate/stlink)
-[](https://raw.githubusercontent.com/hyperium/hyper/master/LICENSE)
## HOWTO
|
chat-js: adjust line height in chat input
Set the line height of the input to be the same as a sent message.
Additionally fixes an issue that would cause unnecessary scrollbars to
be shown. | @@ -8,7 +8,11 @@ import { Sigil } from '/components/lib/icons/sigil';
import { uuid, uxToHex, hexToRgba } from '/lib/util';
-const DEFAULT_INPUT_HEIGHT = 28;
+
+// line height
+const INPUT_LINE_HEIGHT = 28;
+
+const INPUT_TOP_PADDING = 3;
function getAdvance(a, b) {
@@ -103,7 +107,7 @@ export class ChatInput extends Component {
this.state = {
message: '',
- textareaHeight: DEFAULT_INPUT_HEIGHT,
+ textareaHeight: INPUT_LINE_HEIGHT + INPUT_TOP_PADDING + 1,
patpSuggestions: [],
selectedSuggestion: null
};
@@ -309,9 +313,8 @@ export class ChatInput extends Component {
}
textareaInput() {
- const newHeight = this.textareaRef.current.scrollHeight < DEFAULT_INPUT_HEIGHT * 8
- ? `${this.textareaRef.current.scrollHeight}px`
- : `${DEFAULT_INPUT_HEIGHT * 8}px`
+ const maxHeight = INPUT_LINE_HEIGHT * 8 + INPUT_TOP_PADDING;
+ const newHeight = `${Math.min(maxHeight, this.textareaRef.current.scrollHeight)}px`;
this.setState({
textareaHeight: newHeight
@@ -418,7 +421,7 @@ export class ChatInput extends Component {
this.setState({
message: '',
- textareaHeight: DEFAULT_INPUT_HEIGHT
+ textareaHeight: INPUT_LINE_HEIGHT + INPUT_TOP_PADDING + 1
});
}
@@ -459,8 +462,8 @@ export class ChatInput extends Component {
</div>
<div className="fr h-100 flex bg-gray0-d" style={{ flexGrow: 1 }}>
<textarea
- className={"pl3 bn bg-gray0-d white-d"}
- style={{ flexGrow: 1, height: state.textareaHeight, paddingTop: 6, resize: "none" }}
+ className={"pl3 bn bg-gray0-d white-d lh-copy"}
+ style={{ flexGrow: 1, height: state.textareaHeight, paddingTop: INPUT_TOP_PADDING, resize: "none" }}
autoCapitalize="none"
autoFocus={(
/Android|webOS|iPhone|iPad|iPod|BlackBerry/i.test(
|
Add missing PSA_HASH_BLOCK_LENGTH macro. | PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA3_512 ? 64 : \
0)
+/** The input block size of a hash algorithm, in bytes.
+ *
+ * Hash algorithms process their input data in blocks. Hash operations will
+ * retain any partial blocks until they have enough input to fill the block or
+ * until the operation is finished.
+ * This affects the output from psa_hash_suspend().
+ *
+ * \param alg A hash algorithm (\c PSA_ALG_XXX value such that
+ * PSA_ALG_IS_HASH(\p alg) is true).
+ *
+ * \return The block size in bytes for the specified hash algorithm.
+ * If the hash algorithm is not recognized, return 0.
+ * An implementation can return either 0 or the correct size for a
+ * hash algorithm that it recognizes, but does not support.
+ */
+#define PSA_HASH_BLOCK_LENGTH(alg) \
+ ( \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_MD5 ? 64 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_RIPEMD160 ? 64 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_1 ? 64 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_224 ? 64 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_256 ? 64 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_384 ? 128 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_512 ? 128 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_512_224 ? 128 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA_512_256 ? 128 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA3_224 ? 144 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA3_256 ? 136 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA3_384 ? 104 : \
+ PSA_ALG_HMAC_GET_HASH(alg) == PSA_ALG_SHA3_512 ? 72 : \
+ 0)
+
/** \def PSA_HASH_MAX_SIZE
*
* Maximum size of a hash.
|
Fix AFR_ENABLE_DEMOS in CMake GUI | @@ -63,7 +63,8 @@ option(AFR_ENABLE_DEMOS "Build demos for Amazon FreeRTOS." ON)
# Provide an option to enable tests. Also set an helper variable to use in generator expression.
option(AFR_ENABLE_TESTS "Build tests for Amazon FreeRTOS. Requires recompiling whole library." OFF)
if(AFR_ENABLE_TESTS)
- set(AFR_ENABLE_DEMOS 0 CACHE INTERNAL "") # Turning off demo when tests are enabled.
+ # Turning off demo when tests are enabled.
+ set(AFR_ENABLE_DEMOS 0 CACHE BOOL "Build demos for Amazon FreeRTOS." FORCE)
add_compile_definitions(AMAZON_FREERTOS_ENABLE_UNIT_TESTS)
add_compile_definitions(IOT_BUILD_TESTS=1)
set(AFR_IS_TESTING 1 CACHE INTERNAL "")
|
Driver: Struct order matters
Otherwise you get a nontrivial runtime erorr when creating instances. | @@ -15,6 +15,7 @@ struct _mp_obj_type_id_t {
uint16_t flags;
uint16_t name;
mp_print_fun_t print;
+ mp_make_new_fun_id_t make_new; // Modified type compared to MicroPython
mp_call_fun_t call;
mp_unary_op_fun_t unary_op;
mp_binary_op_fun_t binary_op;
@@ -26,7 +27,6 @@ struct _mp_obj_type_id_t {
const void *protocol;
const void *parent;
struct _mp_obj_dict_t *locals_dict;
- // The following two are modified/added
- mp_make_new_fun_id_t make_new;
+ // The following was added
pbio_id_t device_id;
};
|
hw/mcu/cmac: Dispatch RF calibration req
Handled in mynewt-nimble. | #include "cmac_priv.h"
#include "CMAC.h"
+extern void ble_rf_calibrate_req(void);
+
void
SYS2CMAC_IRQHandler(void)
{
@@ -41,6 +43,10 @@ SYS2CMAC_IRQHandler(void)
cmac_sleep_recalculate();
}
+ if (pending_ops & CMAC_PENDING_OP_RF_CAL) {
+ ble_rf_calibrate_req();
+ }
+
CMAC->CM_EXC_STAT_REG = CMAC_CM_EXC_STAT_REG_EXC_SYS2CMAC_Msk;
}
|
oc_oscore_engine:refine secure mcast logic | @@ -161,6 +161,14 @@ oc_oscore_recv_message(oc_message_t *message)
goto oscore_recv_error;
}
+ oc_sec_cred_t *c = (oc_sec_cred_t *)oscore_ctx->cred;
+ if (!(message->endpoint.flags & MULTICAST) &&
+ c->credtype != OC_CREDTYPE_OSCORE) {
+ OC_ERR("***unicast message protected using group OSCORE context; "
+ "silently ignore***");
+ goto oscore_recv_error;
+ }
+
/* Copy "subjectuuid" of cred with OSCORE context to oc_endpoint_t */
oc_sec_cred_t *oscore_cred = (oc_sec_cred_t *)oscore_ctx->cred;
memcpy(message->endpoint.di.id, oscore_cred->subjectuuid.id, 16);
@@ -271,6 +279,13 @@ oc_oscore_recv_message(oc_message_t *message)
OC_DBG("### successfully parsed inner message ###");
+ if (c->credtype == OC_CREDTYPE_OSCORE_MCAST_SERVER &&
+ coap_pkt->code != OC_POST) {
+ OC_ERR("***non-UPDATE multicast request protected using group OSCORE "
+ "context; silently ignore***");
+ goto oscore_recv_error;
+ }
+
/* Copy type, version, mid, token, observe fields from OSCORE packet to
* CoAP Packet */
coap_pkt->transport_type = oscore_pkt->transport_type;
|
IO GLib: Add workaround to fix compilation error | @@ -93,6 +93,17 @@ if (FOUND_NAME GREATER -1)
set (SRC_FILES notificationReload.c)
set (SOURCES ${SRC_FILES} ${HDR_FILES})
if (BUILD_FULL OR BUILD_STATIC)
+ # ~~~
+ # Work around an error that occurs if only `BUILD_FULL`, but not `BUILD_SHARED` is enabled:
+ #
+ # > src/include/kdbio/glib.h:11:10: fatal error: glib.h: No such file or directory
+ #
+ # . See also: https://travis-ci.org/sanssecours/elektra/jobs/445784708.
+ # ~~~
+ find_package (PkgConfig QUIET)
+ pkg_check_modules (GLIB QUIET glib-2.0)
+ include_directories (${GLIB_INCLUDE_DIRS})
+
list (APPEND SOURCES
$<TARGET_OBJECTS:OBJ_elektra-io-glib>) # add sources for elektra-io-uv for static and full builds
endif ()
|
refactor(examples) drop JS-specific code from header.py
This logic was moved into the JS simulator itself | import lvgl as lv
import usys as sys
-# JS requires a special import
-if sys.platform == 'javascript':
- import imp
- sys.path.append('https://raw.githubusercontent.com/lvgl/lv_binding_micropython/4c04dba836a5affcf86cef107b538e45278117ae/lib')
-
import display_driver
|
doc: add short def of hole/non-leaf value | # Holes and Non-leaf values in KeySets
+A hole is the absence of a key, which has keys below it, e.g. if `some/key` is missing in a property file:
+
+```ini
+some = value
+some/key/below = value
+```
+
+`some` has a non-leaf value.
+
+
## Problem
Config files ideally do not copy any structure if they only want to
|
Remove superfluous casts in LMS and LMOTS | @@ -393,9 +393,7 @@ int mbedtls_lmots_calculate_public_key_candidate( const mbedtls_lmots_parameters
return ( ret );
}
- ret = public_key_from_hashed_digit_array( params,
- ( const unsigned char( *)[MBEDTLS_LMOTS_N_HASH_LEN] )y_hashed_digits,
- out );
+ ret = public_key_from_hashed_digit_array( params, y_hashed_digits, out );
if ( ret )
{
return ( ret );
@@ -569,27 +567,21 @@ int mbedtls_lmots_calculate_public_key( mbedtls_lmots_public_t *ctx,
unsigned char y_hashed_digits[MBEDTLS_LMOTS_P_SIG_DIGIT_COUNT][MBEDTLS_LMOTS_N_HASH_LEN];
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
- if( ctx == NULL )
- {
- return( MBEDTLS_ERR_LMS_BAD_INPUT_DATA );
- }
-
/* Check that a private key is loaded */
if ( !priv_ctx->have_private_key )
{
return( MBEDTLS_ERR_LMS_BAD_INPUT_DATA );
}
- ret = hash_digit_array( &priv_ctx->params,
- ( const unsigned char( *)[MBEDTLS_LMOTS_N_HASH_LEN] )(priv_ctx->private_key),
- NULL, NULL, y_hashed_digits );
+ ret = hash_digit_array( &priv_ctx->params, priv_ctx->private_key, NULL,
+ NULL, y_hashed_digits );
if ( ret )
{
return( ret );
}
ret = public_key_from_hashed_digit_array( &priv_ctx->params,
- ( const unsigned char( *)[MBEDTLS_LMOTS_N_HASH_LEN] )y_hashed_digits,
+ y_hashed_digits,
ctx->public_key );
if ( ret )
{
@@ -683,7 +675,7 @@ int mbedtls_lmots_sign( mbedtls_lmots_private_t *ctx,
}
ret = hash_digit_array( &ctx->params,
- ( const unsigned char( *)[MBEDTLS_LMOTS_N_HASH_LEN] )(ctx->private_key),
+ ctx->private_key,
NULL, tmp_digit_array, tmp_sig );
if ( ret )
{
|
Completions: Update help text for depth options | @@ -418,12 +418,12 @@ __fish_kdb_add_option '__fish_kdb_subcommand_includes merge mount remount smount
__fish_kdb_add_option '__fish_kdb_subcommand_includes info' 'load' 'l' 'Load plugin even if system/elektra is available'
# --max-depth -M
-set -l description 'Specify the maximum depth of completion suggestions (unlimited by default, 1 to show only the next level), inclusive'
+set -l description 'Specify the maximum depth (unlimited by default, 1 to show only the next level), exclusive and relative to the name'
set -l argument_function '__fish_kdb_print_option_depth_arguments most 1'
__fish_kdb_add_option '__fish_kdb_subcommand_includes complete ls' 'max-depth' 'M' "$description" "($argument_function)"
# --min-depth -m
-set -l description 'Specify the minimum depth of completion suggestions (0 by default), exclusive'
+set -l description 'Specify the minimum depth (0 by default), inclusive and relative to the name'
set -l argument_function '__fish_kdb_print_option_depth_arguments least 0'
__fish_kdb_add_option '__fish_kdb_subcommand_includes complete ls' 'min-depth' 'm' "$description" "($argument_function)"
|
py/obj.h: Remove obsolete mp_obj_new_fun_viper() declaration. | @@ -649,7 +649,6 @@ mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table);
mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table);
-mp_obj_t mp_obj_new_fun_viper(size_t n_args, void *fun_data, mp_uint_t type_sig);
mp_obj_t mp_obj_new_fun_asm(size_t n_args, void *fun_data, mp_uint_t type_sig);
mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun);
mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed, const mp_obj_t *closed);
|
AFR NimBLE: Save pusHandlesBuffer from AFR service locally | @@ -529,6 +529,7 @@ void vESPBTGATTServerCleanup( void )
for( index = 0; index < serviceCnt; index++ )
{
prvCleanupService( &espServices[ index ] );
+ vPortFree( ( void * ) afrServices[ index ]->pusHandlesBuffer );
vPortFree( ( void * ) afrServices[ index ] );
}
@@ -884,6 +885,7 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
uint16_t handle = 0;
uint16_t attributeCount = 0;
BTService_t * afrFullService = NULL;
+ uint16_t * afrHandlesBuffer = NULL;
if( ( pxService == 0 ) || ( pxService->xNumberOfAttributes == 0 ) )
{
@@ -907,6 +909,16 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
}
afrServices[ serviceCnt ] = afrFullService;
+
+ afrHandlesBuffer = pvPortCalloc( pxService->xNumberOfAttributes, sizeof( uint16_t ) );
+ if ( afrHandlesBuffer == NULL )
+ {
+ xStatus = eBTStatusNoMem;
+ }
+ else
+ {
+ afrServices[ serviceCnt ]->pusHandlesBuffer = afrHandlesBuffer;
+ }
}
else
{
@@ -931,6 +943,7 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
}
/* Fill in field for ESP service. */
+ afrHandlesBuffer[ attributeCount ] = handle;
pxService->pusHandlesBuffer[ attributeCount++ ] = handle;
if( pxService->pxBLEAttributes[ 0 ].xAttributeType == eBTDbPrimaryService )
@@ -1010,6 +1023,7 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
/* Update handle for AFR. */
handle += 2;
+ afrHandlesBuffer[ attributeCount ] = handle;
pxService->pusHandlesBuffer[ attributeCount++ ] = handle;
charCount++;
@@ -1018,6 +1032,7 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
case eBTDbIncludedService:
handle += 1;
+ afrHandlesBuffer[ attributeCount ] = handle;
pxService->pusHandlesBuffer[ attributeCount++ ] = handle;
pIncludedServices[ IncludedSvcCount ] = prvAFRtoESPIncludedServices( pxService->pxBLEAttributes[ index ].xIncludedService );
IncludedSvcCount++;
@@ -1026,6 +1041,7 @@ BTStatus_t prvAddServiceBlob( uint8_t ucServerIf,
case eBTDbDescriptor:
uuid = prvCopytoESPUUID( &pxService->pxBLEAttributes[ index ].xCharacteristicDescr.xUuid, NULL );
handle += 1;
+ afrHandlesBuffer[ attributeCount ] = handle;
pxService->pusHandlesBuffer[ attributeCount++ ] = handle;
if( uuid == NULL )
|
direct-controls: mambo fix for multiple chips | @@ -29,21 +29,27 @@ extern unsigned long callthru_tcl(const char *str, int len);
static void mambo_sreset_cpu(struct cpu_thread *cpu)
{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
uint32_t core_id = pir_to_core_id(cpu->pir);
uint32_t thread_id = pir_to_thread_id(cpu->pir);
char tcl_cmd[50];
- snprintf(tcl_cmd, sizeof(tcl_cmd), "mysim cpu 0:%i:%i start_thread 0x100", core_id, thread_id);
+ snprintf(tcl_cmd, sizeof(tcl_cmd),
+ "mysim cpu %i:%i:%i start_thread 0x100",
+ chip_id, core_id, thread_id);
callthru_tcl(tcl_cmd, strlen(tcl_cmd));
}
static void mambo_stop_cpu(struct cpu_thread *cpu)
{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
uint32_t core_id = pir_to_core_id(cpu->pir);
uint32_t thread_id = pir_to_thread_id(cpu->pir);
char tcl_cmd[50];
- snprintf(tcl_cmd, sizeof(tcl_cmd), "mysim cpu 0:%i:%i stop_thread", core_id, thread_id);
+ snprintf(tcl_cmd, sizeof(tcl_cmd),
+ "mysim cpu %i:%i:%i stop_thread",
+ chip_id, core_id, thread_id);
callthru_tcl(tcl_cmd, strlen(tcl_cmd));
}
|
Fix dead code and reduce number of return points in _pe_iterate_resources | @@ -328,7 +328,8 @@ int _pe_iterate_resources(
{
if (!struct_fits_in_pe(pe, entry, IMAGE_RESOURCE_DIRECTORY_ENTRY))
{
- return RESOURCE_ITERATOR_ABORTED;
+ result = RESOURCE_ITERATOR_ABORTED;
+ break;
}
switch(rsrc_tree_level)
@@ -352,11 +353,8 @@ int _pe_iterate_resources(
PIMAGE_RESOURCE_DIRECTORY directory = (PIMAGE_RESOURCE_DIRECTORY) \
(rsrc_data + RESOURCE_OFFSET(entry));
- if (!struct_fits_in_pe(pe, directory, IMAGE_RESOURCE_DIRECTORY))
+ if (struct_fits_in_pe(pe, directory, IMAGE_RESOURCE_DIRECTORY))
{
- return RESOURCE_ITERATOR_ABORTED;
- }
-
result = _pe_iterate_resources(
pe,
directory,
@@ -370,21 +368,20 @@ int _pe_iterate_resources(
lang_string,
callback,
callback_data);
-
- if (result == RESOURCE_ITERATOR_ABORTED)
- return RESOURCE_ITERATOR_ABORTED;
+ }
+ else
+ {
+ result = RESOURCE_ITERATOR_ABORTED;
+ }
}
else
{
PIMAGE_RESOURCE_DATA_ENTRY data_entry = (PIMAGE_RESOURCE_DATA_ENTRY) \
(rsrc_data + RESOURCE_OFFSET(entry));
- if (!struct_fits_in_pe(pe, data_entry, IMAGE_RESOURCE_DATA_ENTRY))
+ if (struct_fits_in_pe(pe, data_entry, IMAGE_RESOURCE_DATA_ENTRY))
{
- return RESOURCE_ITERATOR_ABORTED;
- }
-
- result = callback(
+ if (callback(
data_entry,
*type,
*id,
@@ -392,19 +389,24 @@ int _pe_iterate_resources(
type_string,
name_string,
lang_string,
- callback_data);
-
- if (result == RESOURCE_CALLBACK_ABORT)
- return RESOURCE_ITERATOR_ABORTED;
+ callback_data) == RESOURCE_CALLBACK_ABORT)
+ {
+ result = RESOURCE_ITERATOR_ABORTED;
+ }
+ }
+ else
+ {
+ result = RESOURCE_ITERATOR_ABORTED;
+ }
}
if (result == RESOURCE_ITERATOR_ABORTED)
- return result;
+ break;
entry++;
}
- return RESOURCE_ITERATOR_FINISHED;
+ return result;
}
|
Actor move event uses actor speed | @@ -33,7 +33,6 @@ UINT16 actor_move_dest_x = 0;
UINT16 actor_move_dest_y = 0;
BYTE actor_move_dir_x = 0;
BYTE actor_move_dir_y = 0;
-BYTE actor_move_speed = 1;
UBYTE scene_stack_ptr = 0;
SCENE_STATE scene_stack[MAX_SCENE_STATES] = {{0}};
UBYTE wait_time = 0;
@@ -145,21 +144,33 @@ UBYTE ScriptUpdate_MoveActor() {
// Actor reached destination
if (actors[script_actor].pos.x == actor_move_dest_x &&
actors[script_actor].pos.y == actor_move_dest_y) {
+ actors[script_actor].moving = FALSE;
+ actors[script_actor].vel.x = 0;
+ actors[script_actor].vel.y = 0;
return TRUE;
}
+ actors[script_actor].moving = TRUE;
// Actor not at horizontal destination
if (actors[script_actor].pos.x != actor_move_dest_x) {
+ actors[script_actor].vel.y = 0;
+ actors[script_actor].dir.y = 0;
if (Lt16(actors[script_actor].pos.x, actor_move_dest_x)) {
- actors[script_actor].pos.x += actor_move_speed;
+ actors[script_actor].vel.x = 1;
+ actors[script_actor].dir.x = 1;
} else if (Gt16(actors[script_actor].pos.x, actor_move_dest_x)) {
- actors[script_actor].pos.x -= actor_move_speed;
+ actors[script_actor].vel.x = -1;
+ actors[script_actor].dir.x = -1;
}
} else {
// Actor not at vertical destination
+ actors[script_actor].vel.x = 0;
+ actors[script_actor].dir.x = 0;
if (Lt16(actors[script_actor].pos.y, actor_move_dest_y)) {
- actors[script_actor].pos.y += actor_move_speed;
+ actors[script_actor].vel.y = 1;
+ actors[script_actor].dir.y = 1;
} else if (Gt16(actors[script_actor].pos.y, actor_move_dest_y)) {
- actors[script_actor].pos.y -= actor_move_speed;
+ actors[script_actor].vel.y = -1;
+ actors[script_actor].dir.y = -1;
}
}
return FALSE;
|
board/eve/usb_pd_policy.c: Format with clang-format
BRANCH=none
TEST=none | @@ -50,8 +50,8 @@ static void board_vbus_update_source_current(int port)
* is controlled by GPIO_USB_C0/1_5V_EN. Both of these signals
* can remain outputs.
*/
- gpio_set_level(gpio_3a_en, vbus_rp[port] == TYPEC_RP_3A0 ?
- 1 : 0);
+ gpio_set_level(gpio_3a_en,
+ vbus_rp[port] == TYPEC_RP_3A0 ? 1 : 0);
gpio_set_level(gpio_5v_en, vbus_en[port]);
} else {
/*
@@ -120,15 +120,13 @@ int pd_check_vconn_swap(int port)
return gpio_get_level(GPIO_PMIC_SLP_SUS_L);
}
-void pd_execute_data_swap(int port,
- enum pd_data_role data_role)
+void pd_execute_data_swap(int port, enum pd_data_role data_role)
{
/* Only port 0 supports device mode. */
if (port != 0)
return;
- gpio_set_level(GPIO_USB2_OTG_ID,
- (data_role == PD_ROLE_UFP) ? 1 : 0);
+ gpio_set_level(GPIO_USB2_OTG_ID, (data_role == PD_ROLE_UFP) ? 1 : 0);
gpio_set_level(GPIO_USB2_OTG_VBUSSENSE,
(data_role == PD_ROLE_UFP) ? 1 : 0);
}
|
doc: clarify that auth. names are lower case and case-sensitive
This is true even for acronyms that are usually upper case, like LDAP.
Reported-by: Alvaro Herrera
Discussion:
Backpatch-through: 10 | @@ -413,7 +413,9 @@ hostnogssenc <replaceable>database</replaceable> <replaceable>user</replaceabl
<para>
Specifies the authentication method to use when a connection matches
this record. The possible choices are summarized here; details
- are in <xref linkend="auth-methods"/>.
+ are in <xref linkend="auth-methods"/>. All the options
+ are lower case and treated case sensitively, so even acronyms like
+ <literal>ldap</literal> must be specified as lower case.
<variablelist>
<varlistentry>
|
add survival dataset generating code | @@ -184,6 +184,34 @@ def generate_dataset_with_num_and_cat_features(
return (DataFrame(feature_columns), labels)
+def generate_survival_dataset(seed=20201015):
+ np.random.seed(seed)
+
+ X = np.random.rand(200, 20)*10
+
+ mean_y = np.sin(X[:, 0])
+
+ y = np.random.randn(200, 10) * 0.3 + mean_y[:, None]
+
+ y_lower = np.min(y, axis=1)
+ y_upper = np.max(y, axis=1)
+ y_upper = np.where(y_upper >= 1.4, -1, y_upper+abs(np.min(y_lower)))
+ y_lower += abs(np.min(y_lower))
+
+ right_censored_ids = np.where(y_upper == -1)[0]
+ interval_censored_ids = np.where(y_upper != -1)[0]
+
+ train_ids = np.hstack(
+ [right_censored_ids[::2], interval_censored_ids[:140]])
+ test_ids = np.hstack(
+ [right_censored_ids[1::2], interval_censored_ids[140:]])
+
+ X_train, y_lower_train, y_upper_train = X[train_ids], y_lower[train_ids], y_upper[train_ids]
+ X_test, y_lower_test, y_upper_test = X[test_ids], y_lower[test_ids], y_upper[test_ids]
+
+ return [(X_train, y_lower_train, y_upper_train), (X_test, y_lower_test, y_upper_test)]
+
+
BY_CLASS_METRICS = ['AUC', 'Precision', 'Recall', 'F1']
|
[chainamker][#435]modify url and name storage method | @@ -84,12 +84,17 @@ __BOATSTATIC BOAT_RESULT chainmakerWalletPrepare(void)
memcpy(wallet_config.user_cert_content.content, chainmaker_user_cert, wallet_config.user_cert_content.length);
//set url and name
- wallet_config.node_cfg.node_url = chainmaker_node_url;
- wallet_config.node_cfg.host_name = chainmaker_host_name;
+ if (((strlen(chainmaker_node_url) > BAOT_CHAINMAKER_URL_HOSTNAME_LEN) ||
+ strlen(chainmaker_host_name) > BAOT_CHAINMAKER_URL_HOSTNAME_LEN))
+ {
+ return BOAT_ERROR;
+ }
+ strncpy(wallet_config.node_url_arry, chainmaker_node_url, strlen(chainmaker_node_url));
+ strncpy(wallet_config.host_name_arry, chainmaker_host_name, strlen(chainmaker_host_name));
//tls ca cert
- wallet_config.node_cfg.org_tls_ca_cert.length = strlen(chainmaker_tls_ca_cert);
- memcpy(wallet_config.node_cfg.org_tls_ca_cert.content, chainmaker_tls_ca_cert, wallet_config.node_cfg.org_tls_ca_cert.length);
+ wallet_config.org_tls_ca_cert.length = strlen(chainmaker_tls_ca_cert);
+ memcpy(wallet_config.org_tls_ca_cert.content, chainmaker_tls_ca_cert, wallet_config.org_tls_ca_cert.length);
// create wallet
#if defined(USE_ONETIME_WALLET)
@@ -139,8 +144,6 @@ int main(int argc, char *argv[])
return -1;
}
- tx_ptr.wallet_ptr->node_info.node_url = chainmaker_node_url;
- tx_ptr.wallet_ptr->node_info.host_name = chainmaker_host_name;
result = BoatHlchainmakerAddTxParam(&tx_ptr, 6, "time", "6543235", "file_hash", "ab3456df5799b87c77e7f85", "file_name", "name005");
if (result != BOAT_SUCCESS)
{
|
Update header guards openrandom.h + linter pass | @@ -23,7 +23,9 @@ typedef struct {
//=========================== prototypes ======================================
void openrandom_init(void);
+
uint16_t openrandom_get16b(void);
+
uint16_t openrandom_getRandomizePeriod(uint16_t period, uint16_t range);
/**
|
fix cuda tool finding | @@ -45,6 +45,8 @@ function main(toolname, parse, opt)
opt = opt or {}
opt.parse = opt.parse or parse
+ local program = nil
+
-- always keep consistency with cuda cache
local toolchains = find_cuda()
if toolchains and toolchains.bindir then
@@ -52,8 +54,7 @@ function main(toolname, parse, opt)
end
-- not found? attempt to find program only
- local program = nil
- if opt.program then
+ if not program then
program = find_program(opt.program or toolname, opt)
end
|
Update contributor for | <github-issue id="766"/>
<github-pull-request id="1562"/>
</commit>
+ <commit subject="Update contributor for 6e635764."/>
<release-item-contributor-list>
- <release-item-ideator id="mahomed.h"/>
+ <release-item-ideator id="mahomed.hussein"/>
<release-item-contributor id="reid.thompson"/>
<release-item-reviewer id="david.steele"/>
</release-item-contributor-list>
<contributor-id type="github">mhagander</contributor-id>
</contributor>
- <contributor id="mahomed.h">
- <contributor-name-display>Mahomed</contributor-name-display>
+ <contributor id="mahomed.hussein">
+ <contributor-name-display>Mahomed Hussein</contributor-name-display>
<contributor-id type="github">mahomedh</contributor-id>
</contributor>
|
Ignoring HUP signal in main process. | @@ -52,6 +52,8 @@ static void nxt_main_process_sigusr1_handler(nxt_task_t *task, void *obj,
void *data);
static void nxt_main_process_sigchld_handler(nxt_task_t *task, void *obj,
void *data);
+static void nxt_main_process_signal_handler(nxt_task_t *task, void *obj,
+ void *data);
static void nxt_main_cleanup_worker_process(nxt_task_t *task, nxt_pid_t pid);
static void nxt_main_stop_worker_processes(nxt_task_t *task, nxt_runtime_t *rt);
static void nxt_main_port_socket_handler(nxt_task_t *task,
@@ -68,6 +70,7 @@ static void nxt_main_port_access_log_handler(nxt_task_t *task,
const nxt_sig_event_t nxt_main_process_signals[] = {
+ nxt_event_signal(SIGHUP, nxt_main_process_signal_handler),
nxt_event_signal(SIGINT, nxt_main_process_sigterm_handler),
nxt_event_signal(SIGQUIT, nxt_main_process_sigquit_handler),
nxt_event_signal(SIGTERM, nxt_main_process_sigterm_handler),
@@ -888,6 +891,14 @@ nxt_main_process_sigchld_handler(nxt_task_t *task, void *obj, void *data)
}
+static void
+nxt_main_process_signal_handler(nxt_task_t *task, void *obj, void *data)
+{
+ nxt_trace(task, "signal signo:%d (%s) recevied, ignored",
+ (int) (uintptr_t) obj, data);
+}
+
+
static void
nxt_main_cleanup_worker_process(nxt_task_t *task, nxt_pid_t pid)
{
|
Make test_alloc_pi_in_epilog() robust vs allocation pattern changes | @@ -9508,26 +9508,18 @@ START_TEST(test_alloc_pi_in_epilog)
"<doc></doc>\n"
"<?pi in epilog?>";
int i;
-#define MAX_ALLOC_COUNT 10
- int repeat = 0;
+#define MAX_ALLOC_COUNT 15
for (i = 0; i < MAX_ALLOC_COUNT; i++) {
- /* Repeat certain counts to allow for cached allocations */
- if (i == 3 && repeat == 1) {
- i -= 2;
- repeat++;
- }
- else if (i == 2 && repeat < 4 && repeat != 1) {
- i--;
- repeat++;
- }
allocation_count = i;
XML_SetProcessingInstructionHandler(parser, dummy_pi_handler);
dummy_handler_flags = 0;
if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text),
XML_TRUE) != XML_STATUS_ERROR)
break;
- XML_ParserReset(parser, NULL);
+ /* See comment in test_alloc_parse_xdecl() */
+ alloc_teardown();
+ alloc_setup();
}
if (i == 0)
fail("Parse completed despite failing allocator");
|
Analysis workflow, remove usage from Configure. | @@ -250,10 +250,10 @@ jobs:
cd ..
export prepath=`pwd`
echo prepath=${prepath}
- echo "curl cpanm"
- curl -L -k -s -S -o cpanm https://cpanmin.us/
- echo "perl cpanm Pod::Usage"
- perl cpanm Pod::Usage || echo whatever
+ #echo "curl cpanm"
+ #curl -L -k -s -S -o cpanm https://cpanmin.us/
+ #echo "perl cpanm Pod::Usage"
+ #perl cpanm Pod::Usage || echo whatever
mkdir openssl
echo "curl openssl"
curl -L -k -s -S -o openssl-1.1.1j.tar.gz https://www.openssl.org/source/openssl-1.1.1j.tar.gz
@@ -266,7 +266,10 @@ jobs:
#cpan POD::Usage
#echo "perl MCPAN"
#perl -MCPAN -e "CPAN::Shell->force(qw(install POD::Usage));"
- ./Configure no-shared no-asm -DOPENSSL_NO_CAPIENG mingw64 --prefix="/$prepath/openssl"
+ # remove pod::Usage because we do not need -help or -man output
+ # from the Configure script
+ sed -e 's/use Pod::Usage//' < Configure > Configure.fix
+ ./Configure.fix no-shared no-asm -DOPENSSL_NO_CAPIENG mingw64 --prefix="/$prepath/openssl"
make
make install_sw
cd ..
|
commented out tests in test_iinq.c to see if it resolves jenkins compilation issues | @@ -233,12 +233,13 @@ iinq_get_suite(
) {
planck_unit_suite_t *suite = planck_unit_new_suite();
+/*
PLANCK_UNIT_ADD_TO_SUITE(suite, iinq_test_create_open_source_intint);
PLANCK_UNIT_ADD_TO_SUITE(suite, iinq_test_create_open_source_string10string20);
PLANCK_UNIT_ADD_TO_SUITE(suite, iinq_test_create_insert_update_delete_drop_dictionary_intint);
PLANCK_UNIT_ADD_TO_SUITE(suite, iinq_test_create_query_select_all_from_where_single_dictionary);
PLANCK_UNIT_ADD_TO_SUITE(suite, iinq_test_create_query_select_all_from_where_two_dictionaries);
-
+*/
return suite;
}
|
Refactor to support Meta in shortcuts
Move the Ctrl and Meta key down checks to each shortcut individually, so
that we can add a shortcut involving Meta. | @@ -155,14 +155,14 @@ void input_manager_process_key(struct input_manager *input_manager,
SDL_bool alt = event->keysym.mod & (KMOD_LALT | KMOD_RALT);
SDL_bool meta = event->keysym.mod & (KMOD_LGUI | KMOD_RGUI);
- if (alt | meta) {
+ if (alt) {
// no shortcut involves Alt or Meta, and they should not be forwarded
// to the device
return;
}
// capture all Ctrl events
- if (ctrl) {
+ if (ctrl | meta) {
SDL_bool shift = event->keysym.mod & (KMOD_LSHIFT | KMOD_RSHIFT);
if (shift) {
// currently, there is no shortcut involving SHIFT
@@ -174,61 +174,65 @@ void input_manager_process_key(struct input_manager *input_manager,
SDL_bool repeat = event->repeat;
switch (keycode) {
case SDLK_h:
- if (!repeat) {
+ if (ctrl && !meta && !repeat) {
action_home(input_manager->controller, action);
}
return;
case SDLK_b: // fall-through
case SDLK_BACKSPACE:
- if (!repeat) {
+ if (ctrl && !meta && !repeat) {
action_back(input_manager->controller, action);
}
return;
case SDLK_s:
- if (!repeat) {
+ if (ctrl && !meta && !repeat) {
action_app_switch(input_manager->controller, action);
}
return;
case SDLK_m:
- if (!repeat) {
+ if (ctrl && !meta && !repeat) {
action_menu(input_manager->controller, action);
}
return;
case SDLK_p:
- if (!repeat) {
+ if (ctrl && !meta && !repeat) {
action_power(input_manager->controller, action);
}
return;
case SDLK_DOWN:
+ if (ctrl && !meta) {
// forward repeated events
action_volume_down(input_manager->controller, action);
+ }
return;
case SDLK_UP:
+ if (ctrl && !meta) {
// forward repeated events
action_volume_up(input_manager->controller, action);
+ }
return;
case SDLK_v:
- if (!repeat && event->type == SDL_KEYDOWN) {
+ if (ctrl && !meta && !repeat && event->type == SDL_KEYDOWN) {
clipboard_paste(input_manager->controller);
}
return;
case SDLK_f:
- if (!repeat && event->type == SDL_KEYDOWN) {
+ if (ctrl && !meta && !repeat && event->type == SDL_KEYDOWN) {
screen_switch_fullscreen(input_manager->screen);
}
return;
case SDLK_x:
- if (!repeat && event->type == SDL_KEYDOWN) {
+ if (ctrl && !meta && !repeat && event->type == SDL_KEYDOWN) {
screen_resize_to_fit(input_manager->screen);
}
return;
case SDLK_g:
- if (!repeat && event->type == SDL_KEYDOWN) {
+ if (ctrl && !meta && !repeat && event->type == SDL_KEYDOWN) {
screen_resize_to_pixel_perfect(input_manager->screen);
}
return;
case SDLK_i:
- if (!repeat && event->type == SDL_KEYDOWN) {
+ if (ctrl && !meta && !repeat && event->type == SDL_KEYDOWN) {
switch_fps_counter_state(input_manager->frames);
}
return;
|
more things to Hoon | @@ -10,20 +10,25 @@ import Untyped.Core
data Hoon a
= HVar a
+ | HAtm Atom
| HCons (Hoon a) (Hoon a)
| BarCen (Cases a)
| BarHep a a (Hoon a) (Hoon a)
| BarTis a (Hoon a)
- | CenBar a (Hoon a)
- | CenGar (Hoon a) (Hoon a)
- | CenGal (Hoon a) (Hoon a)
+ | CenDot (Hoon a) (Hoon a)
+ | CenHep (Hoon a) (Hoon a)
-- | CenKet (Hoon a) (Hoon a) (Hoon a)
-- | CenTar [Hoon a]
| TisFas a (Hoon a) (Hoon a)
+ | DotDot a (Hoon a)
| DotLus (Hoon a)
| DotTis (Hoon a) (Hoon a)
+ | WutBar (Hoon a) (Hoon a)
| WutCol (Hoon a) (Hoon a) (Hoon a)
| WutHep (Hoon a) (Cases a)
+ | WutKet (Hoon a) (Hoon a) (Hoon a)
+ | WutPam (Hoon a) (Hoon a)
+ | WutPat (Hoon a) (Hoon a) (Hoon a)
| ZapZap
type Cases a = [(Pat, Hoon a)]
@@ -37,19 +42,24 @@ desugar = go
where
go = \case
HVar v -> Var v
+ HAtm a -> Atm a
HCons h j -> Cel (go h) (go j)
BarCen cs -> Lam $ Scope $ branch (Var . F . go) (Var (B ())) cs
- BarHep r s i h -> go $ CenGar i $ CenBar r $ BarTis s $ h
+ BarHep r s i h -> go $ CenDot i $ DotDot r $ BarTis s $ h
BarTis v h -> lam v (go h)
- CenBar v h -> fix v (go h)
- CenGar h j -> App (go j) (go h)
- CenGal h j -> App (go h) (go j)
+ CenDot h j -> App (go j) (go h)
+ CenHep h j -> App (go h) (go j)
TisFas v h j -> ledt v (go h) (go j)
+ DotDot v h -> fix v (go h)
DotLus h -> Suc (go h)
DotTis h j -> Eql (go h) (go j)
+ WutBar h j -> Ift (go h) (Atm 0) (go j)
WutCol h j k -> Ift (go h) (go j) (go k)
-- or branch go (go h) cs
WutHep h cs -> Let (go h) $ Scope $ branch (Var . F . go) (Var (B ())) cs
+ WutKet h j k -> Ift (IsC (go h)) (go j) (go k)
+ WutPam h j -> Ift (go h) (go j) (Atm 1)
+ WutPat h j k -> go $ WutKet h k j
ZapZap -> Zap
branch :: (Hoon b -> Exp a) -> Exp a -> Cases b -> Exp a
|
removes u3_sist declarations | void
u3_http_io_poll(void);
- /** Disk persistence.
- **/
- /* u3_sist_boot(): restore or create pier from disk.
- */
- void
- u3_sist_boot(void);
-
- /* u3_sist_pack(): write a log entry to disk.
- **
- ** XX Synchronous.
- **
- ** typ_w is a mote describing the entry type: %ov for Arvo
- ** logs, %ra for Raft events.
- **
- ** Returns the entry's sequence number.
- */
- c3_d
- u3_sist_pack(c3_w tem_w,
- c3_w typ_w,
- c3_w* bob_w,
- c3_w len_w);
-
- /* u3_sist_put(): moronic key-value store put.
- **
- ** u3_sist_put will do its best to associate the passed key with
- ** the passed value in a way that will persist across process
- ** restarts. It will probably do so by writing a file named for
- ** the key with contents identical to the value. To rely on it
- ** for anything heavy-duty would be a mistake.
- **
- ** Why would we even have something like this? Because sometimes
- ** we need to maintain files completely independently of the
- ** noun state.
- */
- void
- u3_sist_put(const c3_c* key_c, const c3_y* val_y, size_t siz_i);
-
- /* u3_sist_nil(): moronic key-value store rm.
- **
- ** Does its best to expunge all records on the given key. Has
- ** no effect if the key doesn't exist.
- */
- void
- u3_sist_nil(const c3_c* key_c);
-
- /* u3_sist_has(): moronic key-value store existence check.
- **
- ** Returns the byte length of the value previously stored via
- ** u3_sist_put, or -1 if it couldn't find one.
- */
- ssize_t
- u3_sist_has(const c3_c* key_c);
-
- /* u3_sist_get(): moronic key-value store get.
- **
- ** u3_sist_get is the mirror of u3_sist_put. It writes to val_y,
- ** which had better be at least as big as the return value from
- ** u3_sist_has, the value that you previously put.
- **
- ** Needless to say, u3_sist_get crashes if it can't find your
- ** value.
- */
- void
- u3_sist_get(const c3_c* key_c, c3_y* val_y);
-
- /* u3_sist_rand(): fill 8 words (32 bytes) with high-quality entropy.
- */
- void
- u3_sist_rand(c3_w* rad_w);
-
-
/** New timer system.
**/
/* u3_behn_io_init(): initialize time timer.
|
group-view: automatically join the group feed if one is present when we first join a group | ?. =(group.update rid) jn-core
=. jn-core (cleanup %done)
?. hidden:(need (scry-group:grp rid))
- :: TODO: join group feed if one is present
+ =/ list-md=(list [=md-resource:metadata =association:metadata])
+ %+ skim ~(tap by associations.update)
+ |= [=md-resource:metadata =association:metadata]
+ =(app-name.md-resource %groups)
+ ?> ?=(^ list-md)
+ =* metadatum metadatum.association.i.list-md
+ ?. ?& ?=(%group -.config.metadatum)
+ ?=(^ feed.config.metadatum)
+ ?=(^ u.feed.config.metadatum)
+ ==
jn-core
+ =* feed resource.u.u.feed.config.metadatum
+ %- emit
+ %+ poke-our:(jn-pass-io /pull-feed) %graph-pull-hook
+ pull-hook-action+!>([%add [entity .]:feed])
%- emit-many
%+ murn ~(tap by associations.update)
|= [=md-resource:metadata =association:metadata]
|
components/screensaver: flip if conditions for better readability | @@ -45,9 +45,9 @@ static void _render(component_t* component)
// if the screensaver is at the edge (or outside e.g. due to screensaver_reset), and moving
// away from the screen, flip the direction so it will always be moving inside or towards
// the screen
- if (((image->position.left + image->dimension.width) >= component->dimension.width &&
- x_direction > 0) ||
- (image->position.left < 0 && x_direction < 0)) {
+ if ((x_direction > 0 &&
+ (image->position.left + image->dimension.width) >= component->dimension.width) ||
+ (x_direction < 0 && image->position.left < 0)) {
x_direction *= -1;
}
}
@@ -56,9 +56,9 @@ static void _render(component_t* component)
// if the screensaver is at the edge (or outside e.g. due to screensaver_reset), and moving
// away from the screen, flip the direction so it will always be moving inside or towards
// the screen
- if (((image->position.top + image->dimension.height) >= component->dimension.height &&
- y_direction > 0) ||
- (image->position.top < 0 && y_direction < 0)) {
+ if ((y_direction > 0 &&
+ (image->position.top + image->dimension.height) >= component->dimension.height) ||
+ (y_direction < 0 && image->position.top < 0)) {
y_direction *= -1;
}
}
|
mmapstorage: small fixes | @@ -183,7 +183,8 @@ static void mmapToKeySet (char * mappedRegion, KeySet * returned)
KeySet * keySet = (KeySet *) (mappedRegion + SIZEOF_MMAPINFO);
returned->array = keySet->array;
returned->size = keySet->size;
- ksRewind(returned);
+ returned->alloc = keySet->alloc;
+ ksRewind(returned); // cursor = 0; current = 0
returned->flags = keySet->flags;
}
@@ -266,12 +267,6 @@ int elektraMmapstorageGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Ke
ELEKTRA_LOG_WARNING ("mappedRegion size: %zu", sbuf.st_size);
ELEKTRA_LOG_WARNING ("mappedRegion ptr: %p", (void *) mappedRegion);
-
-
-
-
-
-
mmapToKeySet (mappedRegion, returned);
//munmap(mappedRegion, sbuf.st_size);
|
py/asmthumb: Detect presence of I-cache using CMSIS macro.
Fixes issue | @@ -51,7 +51,7 @@ void asm_thumb_end_pass(asm_thumb_t *as) {
(void)as;
// could check labels are resolved...
- #if defined(MCU_SERIES_F7)
+ #if __ICACHE_PRESENT == 1
if (as->base.pass == MP_ASM_PASS_EMIT) {
// flush D-cache, so the code emitted is stored in memory
MP_HAL_CLEAN_DCACHE(as->base.code_base, as->base.code_size);
|
fix path to rn_dev_env script | @@ -293,10 +293,12 @@ Example:
LD_LIBRARY_PATH=/home/username/TU/libelektra/cmake-build-debug/lib
-If you want to run built `kdb` outside of CLion, the recommended way is to run this script from your build directory:
+If you want to run built `kdb` outside of CLion, the recommended way is to run this script from your build directory. The script resides in you original directory with project sources.
+
+Example:
```sh
-. /scripts/run_dev_env
+. /PATH/TO/YOUR/PROJECT/scripts/run_dev_env
```
Please keep in mind it sets the variables only in the currently opened shell window/session.
|
viofs: delete SDV files as part of cleanup script | call :rmdir Install
call :rmdir Install_Debug
call :rmdir Release
+call :rmdir pci\sdv
call :rmfiles *.log
call :rmfiles *.err
call :cleandir
+del pci\smvbuild.log
+del pci\smvstats.txt
+del pci\viofs.DVL.XML
pushd pci
call :cleandir
|
increment error count if GPS device went down | @@ -5116,6 +5116,12 @@ static const char *gpgga = "$GPGGA";
static const char *gprmc = "$GPRMC";
nmeatemplen = read(fd_gps, nmeatempsentence, NMEA_MAX -1);
+if(nmeatemplen < 0)
+ {
+ perror("\nfailed to read NMEA sentence");
+ errorcount++;
+ return;
+ }
if(nmeatemplen < 44) return;
nmeatempsentence[nmeatemplen] = 0;
nmeaptr = strstr(nmeatempsentence, gpgga);
|
Fix sslapitest.c if built with no-legacy
We skip a test that uses the no-legacy option. Unfortuantely there is
no OPENSSL_NO_LEGACY to test, so we just check whether we were successful
in loading the legacy provider - and if not we skip the test. | @@ -7972,9 +7972,18 @@ static int test_pluggable_group(int idx)
OSSL_PROVIDER *legacyprov = OSSL_PROVIDER_load(libctx, "legacy");
const char *group_name = idx == 0 ? "xorgroup" : "xorkemgroup";
- if (!TEST_ptr(tlsprov) || !TEST_ptr(legacyprov))
+ if (!TEST_ptr(tlsprov))
goto end;
+ if (legacyprov == NULL) {
+ /*
+ * In this case we assume we've been built with "no-legacy" and skip
+ * this test (there is no OPENSSL_NO_LEGACY)
+ */
+ testresult = 1;
+ goto end;
+ }
+
if (!TEST_true(create_ssl_ctx_pair(libctx, TLS_server_method(),
TLS_client_method(),
TLS1_3_VERSION,
|
Ignore GCC's -Wmaybe-uninitialize warnings | @@ -5,7 +5,8 @@ local c_compiler = {}
c_compiler.CC = "cc"
c_compiler.CPPFLAGS = "-I./lua/src -I./runtime"
c_compiler.CFLAGS_BASE = "-std=c99 -g -fPIC"
-c_compiler.CFLAGS_WARN = "-Wall -Wundef -Wshadow -Wpedantic -Wno-unused"
+c_compiler.CFLAGS_WARN = "-Wall -Wundef -Wshadow -Wpedantic -Wno-unused " ..
+ "-Wno-maybe-uninitialized -Wno-unknown-warning-option"
c_compiler.CFLAGS_OPT = "-O2"
c_compiler.S_FLAGS = "-fverbose-asm"
|
Fix signature of lua_rawgeti and lua_rawseti
Both were using CInt instead of LuaInteger. | @@ -433,7 +433,7 @@ foreign import ccall unsafe "lua.h lua_rawgeti"
#else
foreign import ccall safe "lua.h lua_rawgeti"
#endif
- lua_rawgeti :: LuaState -> StackIndex -> CInt -> IO ()
+ lua_rawgeti :: LuaState -> StackIndex -> LuaInteger -> IO ()
-- | See <https://www.lua.org/manual/5.3/manual.html#lua_createtable lua_createtable>
#ifdef ALLOW_UNSAFE_GC
@@ -497,7 +497,7 @@ foreign import ccall unsafe "lua.h lua_rawseti"
#else
foreign import ccall safe "lua.h lua_rawseti"
#endif
- lua_rawseti :: LuaState -> StackIndex -> CInt -> IO ()
+ lua_rawseti :: LuaState -> StackIndex -> LuaInteger -> IO ()
-- | See <https://www.lua.org/manual/5.3/manual.html#lua_setmetatable lua_setmetatable>
#ifdef ALLOW_UNSAFE_GC
|
Renamed targets + help | @@ -33,15 +33,38 @@ endif
# Targets
#
+help:
+ @echo
+ @echo "WalletKit Build Targets"
+ @echo
+ @echo " btc-test: Builds Bitcoin 'test.c' directly with all required WalletKit sources"
+ @echo
+ @echo " btc-text-run: Runs Bitcoin tests"
+ @echo
+ @echo " wallet-kit-libs: Builds the WalletKit shared library 'WalletKitCore'"
+ @echo
+ @echo " wallet-kit-test: Builds the WalletKit test applications, linked with 'WalletKitCore' shared library"
+ @echo
+ @echo " wallet-kit-test-run: Builds and runs WalletKit test applications"
+ @echo
+ @echo " clean: Removes the build folder and all build files"
+ @echo
+ @echo " For 'wallet-kit-*' targets, cmake is a prerequisite. Deliverables go to 'build' folder."
+ @echo " The first invokation of a 'wallet-kit-*' target will both execute cmake and do a build."
+ @echo " Thereafter, incremental builds will be done until the next 'clean'."
+ @echo
+ @echo " This Makefile has been tested using make version 4.2.1 and cmake version 3.16.3"
+ @echo " Both Darwin/Clang and GNU/Linux based builds are supported."
+
# Bitcoin Tests
-test: clean
+btc-test: clean
cc -o $@ -I./include -I./src \
-I./vendor -I./vendor/secp256k1 \
$(CFLAGS_OS_BRD) $(CFLAGS_CPU_BRD) \
./src/bitcoin/*.c ./src/bcash/*.c ./src/bsv/*.c ./src/litecoin/*.c ./src/dogecoin/*.c ./src/support/event/*.c ./src/support/*.c ./vendor/sqlite3/sqlite3.c \
-IWalletKitCoreTests/test/include WalletKitCoreTests/test/bitcoin/test.c $(LIBS_OS_BRD)
-run: test
+btc-test-run: test
./test
@@ -58,20 +81,28 @@ run: test
#
########################################################################################
-# Make libcorecrypto.so on Linux with gcc
-libs: FORCE
- (rm -rf build; mkdir -p build/libs; cd build/libs; cmake ../..; make clean; make)
+cmake-walletkit-debug:
+ if [ ! -d "build" ]; then mkdir build; cd build; cmake -DCMAKE_BUILD_TYPE=Debug ..; fi
+
+cmake-walletkit:
+ if [ ! -d "build" ]; then mkdir build; cd build; cmake ..; fi
+
+cmake-installed:
+ @[ -f `which cmake` ] || { echo "cmake is required for WalletKit make"; exit 1; }
+
+wallet-kit-libs: cmake-installed cmake-walletkit FORCE
+ make -C build
-# Make libcorecrypto.so, libcorecryptotests.so, WalletKitCoreTests & test_bitcoin
-# which is an application that runs equivalent of WalletKitCoreTests.swift and links
-# with the 2 shared libs
-tests: FORCE
- (rm -rf build; mkdir -p build/tests; cd build/tests; cmake -DCMAKE_BUILD_TYPE=Debug ../..; make clean; make)
+wallet-kit-test: cmake-installed cmake-walletkit-debug FORCE
+ make -C build
+wallet-kit-test-run: wallet-kit-test FORCE
+ build/WalletKitCoreTests
+ build/bitcoin_test
#################################### E/O Linux/GNU #####################################
clean:
- rm -f *.o */*.o */*/*.o test
- rm -rf build
+ @rm -f *.o */*.o */*/*.o test
+ @rm -rf build
FORCE:
|
turtle: simplify state tracking | @@ -26,9 +26,9 @@ static uint8_t turtle_axis = 0;
static uint8_t turtle_dir = 0;
void turtle_mode_start() {
- if (!turtle_ready && flags.on_ground) {
+ if (!turtle_ready && flags.on_ground && turtle_state == TURTLE_STAGE_IDLE) {
+ // only enable turtle if we are onground and not recovering from a interrupted turtle
turtle_ready = true;
- turtle_state = TURTLE_STAGE_IDLE;
}
}
@@ -43,23 +43,17 @@ void turtle_mode_update() {
flags.turtle = 0;
}
- // turtle can't be initiated without the all clear flag - hold control variables at 0 state
- if (!turtle_ready) {
- if (turtle_state != TURTLE_STAGE_IDLE) {
- flags.arm_safety = 1; // just in case absolutely require that the quad be disarmed when turning off turtle mode with a started sequencer
-
+ // turtle was interrupted
+ if (!turtle_ready && turtle_state != TURTLE_STAGE_IDLE) {
// force motors to forward, in case turtle was interrupted
if (!motor_set_direction(MOTOR_FORWARD)) {
return;
}
- }
- // turtle mode off or flying away from a successful turtle will return here
- // a disarmed quad with turtle mode on will continue past
+ // reset state
turtle_state = TURTLE_STAGE_IDLE;
flags.controls_override = 0;
flags.motortest_override = 0;
-
return;
}
|
l2: l2_patch_main should not be static
Without understanding what is going on,
a pattern from l2_fwd.c is applied to l2_patch.c file.
Type: fix
Fixes:
Ticket: | @@ -49,7 +49,11 @@ format_l2_patch_trace (u8 * s, va_list * args)
return s;
}
-static l2_patch_main_t l2_patch_main;
+#ifndef CLIB_MARCH_VARIANT
+l2_patch_main_t l2_patch_main;
+#else
+extern l2_patch_main_t l2_patch_main;
+#endif
extern vlib_node_registration_t l2_patch_node;
|
api/backup: fix typo in test variable | @@ -258,7 +258,7 @@ mod tests {
#[test]
pub fn test_list() {
- const EXPECTED_TIMESTMAP: u32 = 1601281809;
+ const EXPECTED_TIMESTAMP: u32 = 1601281809;
const DEVICE_NAME_1: &str = "test device name";
const DEVICE_NAME_2: &str = "another test device name";
@@ -283,7 +283,7 @@ mod tests {
mock_memory();
bitbox02::memory::set_device_name(DEVICE_NAME_1).unwrap();
assert!(block_on(create(&pb::CreateBackupRequest {
- timestamp: EXPECTED_TIMESTMAP,
+ timestamp: EXPECTED_TIMESTAMP,
timezone_offset: 18000,
}))
.is_ok());
@@ -293,7 +293,7 @@ mod tests {
Ok(Response::ListBackups(pb::ListBackupsResponse {
info: vec![pb::BackupInfo {
id: "41233dfbad010723dbbb93514b7b81016b73f8aa35c5148e1b478f60d5750dce".into(),
- timestamp: EXPECTED_TIMESTMAP,
+ timestamp: EXPECTED_TIMESTAMP,
name: DEVICE_NAME_1.into(),
}]
}))
@@ -309,7 +309,7 @@ mod tests {
mock_memory();
bitbox02::memory::set_device_name(DEVICE_NAME_2).unwrap();
assert!(block_on(create(&pb::CreateBackupRequest {
- timestamp: EXPECTED_TIMESTMAP,
+ timestamp: EXPECTED_TIMESTAMP,
timezone_offset: 18000,
}))
.is_ok());
@@ -321,13 +321,13 @@ mod tests {
pb::BackupInfo {
id: "41233dfbad010723dbbb93514b7b81016b73f8aa35c5148e1b478f60d5750dce"
.into(),
- timestamp: EXPECTED_TIMESTMAP,
+ timestamp: EXPECTED_TIMESTAMP,
name: DEVICE_NAME_1.into(),
},
pb::BackupInfo {
id: "4c7005846ffc09f31850201a6fdfff084191164eb318db2c6fe5a39df4a97ba0"
.into(),
- timestamp: EXPECTED_TIMESTMAP,
+ timestamp: EXPECTED_TIMESTAMP,
name: DEVICE_NAME_2.into(),
}
]
|
Change to use blocksize=128 | @@ -104,7 +104,7 @@ PORTABILITY_LIBS = -lm
#submit = mpirun ${MPIRUN_OPTS} -np $ranks $command
submit = mpirun ${MPIRUN_OPTS} -np $ranks gpurun -s $command
OPTIMIZE += -fopenmp -fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=%{gputype}
- OPTIMIZE += -fopenmp-target-xteam-reduction-blocksize=512
+ OPTIMIZE += -fopenmp-target-xteam-reduction-blocksize=128
505.lbm_t:
OPTIMIZE += -fno-openmp-target-ignore-env-vars
|
Ensure the --load-from-disk flag is enabled if no piped data and no file is passed. | @@ -2610,7 +2610,7 @@ read_log (GLog ** glog, int dry_run)
FILE *fp = NULL;
/* no data piped, no log passed, load from disk only then */
- if (conf.load_from_disk && !conf.ifile && isatty (STDIN_FILENO)) {
+ if (conf.load_from_disk && !conf.ifile && !isatty (STDIN_FILENO)) {
(*glog)->load_from_disk_only = 1;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.