message
stringlengths
6
474
diff
stringlengths
8
5.22k
Fix incorrect output with Delete Dimm CMD
@@ -211,7 +211,7 @@ DeleteDimm( if (EFI_ERROR(ReturnCode)) { goto Finish; } - PRINTER_SET_MSG(pPrinterCtx, ReturnCode, L"Erasing DIMM (" FORMAT_STR L").", DimmStr); + PRINTER_PROMPT_MSG(pPrinterCtx, ReturnCode, L"Erasing DIMM " FORMAT_STR L".", DimmStr); ReturnCode = PromptYesNo(&Confirmation); if (!EFI_ERROR(ReturnCode) && Confirmation) { ReturnCode = pNvmDimmConfigProtocol->SetSecurityState(pNvmDimmConfigProtocol,&pDimmIds[Index], 1, @@ -220,7 +220,7 @@ DeleteDimm( goto FinishCommandStatusSet; } } else { - PRINTER_SET_MSG(pPrinterCtx, ReturnCode, L"Skipped erasing data from DIMM (" FORMAT_STR L")\n", DimmStr); + PRINTER_PROMPT_MSG(pPrinterCtx, ReturnCode, L"Skipped erasing data from DIMM " FORMAT_STR L"\n", DimmStr); continue; } }
Ensure that static apps use the proc name of the app and not the scope executable.
#include <sys/wait.h> #include <dlfcn.h> #include <sys/utsname.h> +#include <limits.h> +#include <errno.h> + #include "fn.h" +#include "dbg.h" #include "scopeelf.h" #include "scopetypes.h" -#include <limits.h> -#include <errno.h> #define DEVMODE 0 #define __NR_memfd_create 319 @@ -418,6 +420,8 @@ main(int argc, char **argv, char **env) execve(argv[0], argv, environ); } + program_invocation_short_name = basename(argv[1]); + if ((handle = dlopen(info->path, RTLD_LAZY)) == NULL) { fprintf(stderr, "%s\n", dlerror()); goto err;
HV: Fix modularization vm config code lost CAT code Previous change 'HV: modularization vm config code', commit id lost CAT code when move sanitize_vm_config() from vm.c to vm_config.c
#include <errno.h> #include <acrn_common.h> #include <page.h> +#include <logmsg.h> +#include <cat.h> #ifndef CONFIG_PARTITION_MODE #include <sos_vm.h> @@ -119,6 +121,16 @@ int32_t sanitize_vm_config(void) /* Nothing to do for a UNDEFINED_VM, break directly. */ break; } + + if ((vm_config->guest_flags & CLOS_REQUIRED) != 0U) { + if (cat_cap_info.support && (vm_config->clos <= cat_cap_info.clos_max)) { + cat_cap_info.enabled = true; + } else { + pr_err("%s set wrong CLOS or CAT is not supported\n", __func__); + ret = -EINVAL; + } + } + if (ret != 0) { break; }
Fix Joint:getType returning nil for DistanceJoints;
@@ -20,6 +20,7 @@ int l_lovrPhysicsInit(lua_State* L) { map_init(&JointTypes); map_set(&JointTypes, "ball", JOINT_BALL); + map_set(&JointTypes, "distance", JOINT_DISTANCE); map_set(&JointTypes, "hinge", JOINT_HINGE); map_set(&JointTypes, "slider", JOINT_SLIDER);
hw/mcu/nordic: Add enable/disable ADC channel functions This commit adds two helper functions to simply enable/disable an ADC channel. This main purpose of adding these is to increment the number of active channels and connect/disconnect PSELP/PSELN without having to reconfigure the entire channel.
@@ -627,4 +627,20 @@ void nrfx_saadc_limits_set(uint8_t channel, int16_t limit_low, int16_t limit_hig nrf_saadc_int_enable(int_mask); } } + +void nrfx_enable_adc_chan(int chan, nrf_saadc_input_t pselp, + nrf_saadc_input_t pseln) +{ + NRFX_ASSERT(m_cb.active_channels < NRF_SAADC_CHANNEL_COUNT); + ++m_cb.active_channels; + nrf_saadc_channel_input_set(chan, pselp, pseln); +} + +void nrfx_disable_adc_chan(int chan) +{ + NRFX_ASSERT(m_cb.active_channels != 0); + --m_cb.active_channels; + nrf_saadc_channel_input_set(chan, NRF_SAADC_INPUT_DISABLED, + NRF_SAADC_INPUT_DISABLED); +} #endif // NRFX_CHECK(NRFX_SAADC_ENABLED)
GA: Use python executable to get include and lib.
@@ -83,7 +83,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9] + python: [3.5, 3.6, 3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 @@ -104,7 +104,7 @@ jobs: - name: Configure CMake shell: bash working-directory: ${{runner.workspace}}/build - run: cmake $GITHUB_WORKSPACE -A x64 -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DTINYSPLINE_ENABLE_PYTHON=True + run: cmake $GITHUB_WORKSPACE -A x64 -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DTINYSPLINE_ENABLE_PYTHON=True -DPYTHON_INCLUDE_DIR="$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())")" -DPYTHON_LIBRARY="$(python -c "import distutils.sysconfig as sysconfig; import os; print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))")" - name: Build working-directory: ${{runner.workspace}}/build
Update board.h Fix wrong parameters for onboard Flash memory.
PIN_PUPDR_FLOATING(GPIOA_AN4) | \ PIN_PUPDR_FLOATING(GPIOA_AN1) | \ PIN_PUPDR_FLOATING(GPIOA_AN3) | \ - PIN_PUPDR_PULLDOWN(GPIOA_FLASH_HOLD) | \ + PIN_PUPDR_FLOATING(GPIOA_FLASH_HOLD) | \ PIN_PUPDR_FLOATING(GPIOA_TX4) | \ PIN_PUPDR_FLOATING(GPIOA_RX4) | \ PIN_PUPDR_FLOATING(GPIOA_OTG_FS_DM) | \ PIN_ODR_LOW(GPIOA_AN4) | \ PIN_ODR_LOW(GPIOA_AN1) | \ PIN_ODR_LOW(GPIOA_AN3) | \ - PIN_ODR_LOW(GPIOA_FLASH_HOLD) | \ + PIN_ODR_HIGH(GPIOA_FLASH_HOLD) | \ PIN_ODR_LOW(GPIOA_TX4) | \ PIN_ODR_LOW(GPIOA_RX4) | \ PIN_ODR_HIGH(GPIOA_OTG_FS_DM) | \
Remove frametime when using fps_only param
@@ -485,15 +485,6 @@ void HudElements::fps_only(){ ImGui::TableNextRow(); ImGui::TableNextColumn(); ImGui::TextColored(HUDElements.colors.text, "%.0f", HUDElements.sw_stats->fps); } - if (HUDElements.params->enabled[OVERLAY_PARAM_ENABLED_frametime]){ - ImGui::TableNextColumn(); - right_aligned_text(HUDElements.colors.text, HUDElements.ralign_width, "%.1f", 1000 / HUDElements.sw_stats->fps); - ImGui::SameLine(0, 1.0f); - ImGui::PushFont(HUDElements.sw_stats->font1); - ImGui::Text("ms"); - ImGui::PopFont(); - } - } }
When migrating player sprites to default per scene type set the player spritesheet to have an 16x8px bounding box to match GBS2 functionality
@@ -1048,6 +1048,15 @@ const migrateFrom200r7To200r8Sprites = (data) => { const migrateFrom200r6To200r7Settings = (data) => { return { ...data, + spriteSheets: data.spriteSheets.map((spriteSheet) => { + if (spriteSheet.id !== data.settings.playerSpriteSheetId) { + return spriteSheet; + } + return { + ...spriteSheet, + boundsHeight: 8, + }; + }), settings: { ...data.settings, defaultPlayerSprites: {
qlog: update the output format to draft-02
@@ -26,12 +26,16 @@ import json PACKET_LABELS = ["initial", "0rtt", "handshake", "1rtt"] def handle_packet_lost(events, idx): - return [events[idx]["time"], "recovery", "packet_lost", { - "packet_type": PACKET_LABELS[events[idx]["packet-type"]], + return { + "time": events[idx]["time"], + "name": "recovery:packet_lost", + "data": { "header": { + "packet_type": PACKET_LABELS[events[idx]["packet-type"]], "packet_number": events[idx]["pn"] } - }] + } + } def handle_packet_received(events, idx): frames = [] @@ -54,13 +58,17 @@ def handle_packet_received(events, idx): if handler: frames.append(handler(ev)) - return [events[idx]["time"], "transport", "packet_received", { - "packet_type": PACKET_LABELS[events[idx]["packet-type"]], + return { + "time": events[idx]["time"], + "name": "transport:packet_received", + "data": { "header": { + "packet_type": PACKET_LABELS[events[idx]["packet-type"]], "packet_number": events[idx]["pn"] }, "frames": frames - }] + } + } def handle_packet_sent(events, idx): frames = [] @@ -71,13 +79,17 @@ def handle_packet_sent(events, idx): frames.append(handler(events[i])) i -= 1 - return [events[idx]["time"], "transport", "packet_sent", { - "packet_type": PACKET_LABELS[events[idx]["packet-type"]], + return { + "time": events[idx]["time"], + "name": "transport:packet_sent", + "data": { "header": { + "packet_type": PACKET_LABELS[events[idx]["packet-type"]], "packet_number": events[idx]["pn"] }, "frames": frames - }] + } + } def handle_ack_send(event): return render_ack_frame([[event["largest-acked"]]]) @@ -333,28 +345,24 @@ def main(): (_, infile) = sys.argv source_events = load_quicly_events(infile) - trace = { + print(json.dumps({ + "qlog_format": "NDJSON", + "qlog_version": "draft-02", + "title": "h2o/quicly qlog", + "trace": { "vantage_point": { "type": "server" }, - "event_fields": [ - "time", - "category", - "event", - "data" - ], - "events": [] + "common_fields": { + "protocol_type": "QUIC_HTTP3", + "time_format": "absolute" + } } + })) for i, event in enumerate(source_events): handler = QLOG_EVENT_HANDLERS.get(event["type"]) if handler: - trace["events"].append(handler(source_events, i)) - - print(json.dumps({ - "qlog_version": "draft-02-wip", - "title": "h2o/quicly qlog", - "traces": [trace] - })) + print(json.dumps(handler(source_events, i))) if __name__ == "__main__": main()
LPC55xx: fix dumb inverted assert condition in flash_is_readable().
@@ -221,7 +221,7 @@ void hic_power_target(void) bool flash_is_readable(uint32_t addr, uint32_t length) { // Make sure the core clock is less than 100 MHz, or flash commands won't work. - util_assert(SystemCoreClock > 100000000); + util_assert(SystemCoreClock < 100000000); // Return true if the address is within internal flash and the flash sector is not erased. if (!(addr >= DAPLINK_ROM_START && addr < (DAPLINK_ROM_START + DAPLINK_ROM_SIZE))) {
rpi-base: Rename the rpi0w dtb This change is following the rename in the kernel: commit Author: Phil Elwell Date: Tue May 28 16:36:04 2019 +0100
@@ -40,7 +40,7 @@ RPI_KERNEL_DEVICETREE_OVERLAYS ?= " \ " RPI_KERNEL_DEVICETREE ?= " \ - bcm2708-rpi-0-w.dtb \ + bcm2708-rpi-zero-w.dtb \ bcm2708-rpi-b.dtb \ bcm2708-rpi-b-plus.dtb \ bcm2709-rpi-2-b.dtb \
added `shallow` for all the submodules
[submodule "vendor/blip-buf"] path = vendor/blip-buf url = https://github.com/nesbox/blip-buf.git + shallow = true [submodule "vendor/curl"] path = vendor/curl url = https://github.com/curl/curl.git + shallow = true [submodule "vendor/dirent"] path = vendor/dirent url = https://github.com/tronkko/dirent.git + shallow = true [submodule "vendor/duktape"] path = vendor/duktape url = https://github.com/svaarala/duktape-releases.git [submodule "vendor/giflib"] path = vendor/giflib url = https://github.com/nesbox/giflib.git + shallow = true [submodule "vendor/lpeg"] path = vendor/lpeg url = https://github.com/nesbox/lpeg.git + shallow = true [submodule "vendor/lua"] path = vendor/lua url = https://github.com/lua/lua.git + shallow = true [submodule "vendor/sdl-gpu"] path = vendor/sdl-gpu url = https://github.com/grimfang4/sdl-gpu.git + shallow = true [submodule "vendor/sdl2"] path = vendor/sdl2 url = https://github.com/SDL-mirror/SDL.git + shallow = true [submodule "vendor/sokol"] path = vendor/sokol url = https://github.com/floooh/sokol.git + shallow = true [submodule "vendor/squirrel"] path = vendor/squirrel url = https://github.com/albertodemichelis/squirrel.git + shallow = true [submodule "vendor/wren"] path = vendor/wren url = https://github.com/wren-lang/wren.git + shallow = true [submodule "vendor/zlib"] path = vendor/zlib url = https://github.com/madler/zlib.git + shallow = true [submodule "vendor/zip"] path = vendor/zip url = https://github.com/kuba--/zip.git + shallow = true [submodule "vendor/moonscript"] path = vendor/moonscript url = https://github.com/nesbox/moonscript.git + shallow = true [submodule "vendor/argparse"] path = vendor/argparse url = https://github.com/cofyc/argparse.git + shallow = true [submodule "vendor/circle-stdlib"] path = vendor/circle-stdlib url = https://github.com/smuehlst/circle-stdlib.git + shallow = true [submodule "vendor/libuv"] path = vendor/libuv url = https://github.com/libuv/libuv.git + shallow = true [submodule "vendor/http-parser"] path = vendor/http-parser url = https://github.com/nodejs/http-parser.git + shallow = true
Fixup. Clarify distance units in labels
"FIELD_SOURCE": "Source", "FIELD_X_POSITION": "X Position", "FIELD_Y_POSITION": "Y Position", - "FIELD_DISTANCE_PU": "Distance in Pixel Units", + "FIELD_DISTANCE_PU": "Distance in Tile Units", "FIELD_USE_COLLISIONS": "Use Collisions", "FIELD_ORDER": "Order", "FIELD_HORIZONTAL_FIRST": "Horizontal First", "EVENT_IF_ACTOR_AT_POSITION_LABEL": "If {actor} At Position {{x},{y}}", "EVENT_IF_ACTOR_DIRECTION_LABEL": "If {actor} Facing {direction}", "EVENT_IF_ACTOR_RELATIVE_TO_ACTOR_LABEL": "If {actor} {direction} {otherActor}", - "EVENT_IF_ACTOR_DISTANCE_FROM_ACTOR_LABEL": "If {actor} {distance}pu from {otherActor}", + "EVENT_IF_ACTOR_DISTANCE_FROM_ACTOR_LABEL": "If {actor} {distance}tu from {otherActor}", "EVENT_SWITCH_SCENE_LABEL": "Change Scene To {scene} At {{x},{y}}", "EVENT_MENU_LABEL": "Display Menu: Set {variable} With Options {text}", "EVENT_CHOICE_LABEL": "Display Choice: Set {variable} With Options {text}",
requires ethereum gateway (with -e) for non-fake galaxy boot
@@ -221,6 +221,12 @@ _main_getopt(c3_i argc, c3_c** argv) return c3n; } + if ( (0 == u3_Host.ops_u.fak_c) && (0 == u3_Host.ops_u.eth_c) && imp_t ) { + fprintf(stderr, "can't create a new galaxy without specifying " + "the Ethereum gateway with -e\n"); + return c3n; + } + if ( u3_Host.ops_u.arv_c == 0 && imp_t ) { fprintf(stderr, "can't create a new galaxy without specifying " "the initial sync path with -A\n");
docs: saiph-c download does not work, disable it now
@@ -181,9 +181,6 @@ $ gcc myBot.c -o myBot.out -pthread -ldiscord -lcurl -lcrypto -lm ## Debugging Memory Errors -* The recommended method: - Use [SaiphC](docs/SAIPHC.md) to build your bot and run the generated executable. All runtime memory errors will be reported. - * The convenient method: Using valgrind which cannot report all runtime memory errors. ```bash
Make Shortcuts run in full screen on iPhone
@@ -253,6 +253,8 @@ class EditorSplitViewController: SplitViewController { } } + private var willRun: Bool? + // MARK: - Split view controller override func canPerformAction(_ action: Selector, withSender sender: Any?) -> Bool { @@ -379,7 +381,11 @@ class EditorSplitViewController: SplitViewController { return } - if (newCollection.horizontalSizeClass == .compact || UIDevice.current.userInterfaceIdiom == .phone) && !EditorSplitViewController.shouldShowConsoleAtBottom { + if willRun == nil { + willRun = editor.shouldRun + } + + if (newCollection.horizontalSizeClass == .compact || UIDevice.current.userInterfaceIdiom == .phone) && !EditorSplitViewController.shouldShowConsoleAtBottom && !willRun! { arrangement = .vertical } else {
The Quest has thumbsticks, not touchpads. Fix constants.
@@ -165,13 +165,14 @@ static bool vrapi_getVelocity(Device device, vec3 velocity, vec3 angularVelocity return true; } +// Notice: Quest has a thumbstick, Go has a touchpad static bool buttonDown(BridgeLovrButton field, DeviceButton button, bool *result) { if (bridgeLovrMobileData.deviceType == BRIDGE_LOVR_DEVICE_QUEST) { switch (button) { case BUTTON_MENU: *result = field & BRIDGE_LOVR_BUTTON_MENU; break; // Technically "LMENU" but only fires on left controller case BUTTON_TRIGGER: *result = field & BRIDGE_LOVR_BUTTON_SHOULDER; break; case BUTTON_GRIP: *result = field & BRIDGE_LOVR_BUTTON_GRIP; break; - case BUTTON_TOUCHPAD: *result = field & BRIDGE_LOVR_BUTTON_JOYSTICK; break; + case BUTTON_THUMBSTICK: *result = field & BRIDGE_LOVR_BUTTON_JOYSTICK; break; case BUTTON_A: *result = field & BRIDGE_LOVR_BUTTON_A; break; case BUTTON_B: *result = field & BRIDGE_LOVR_BUTTON_B; break; case BUTTON_X: *result = field & BRIDGE_LOVR_BUTTON_X; break; @@ -196,7 +197,7 @@ static bool buttonTouch(BridgeLovrTouch field, DeviceButton button, bool *result switch (button) { case BUTTON_TRIGGER: *result = field & (BRIDGE_LOVR_TOUCH_TRIGGER); break; - case BUTTON_TOUCHPAD: *result = field & (BRIDGE_LOVR_TOUCH_TOUCHPAD | BRIDGE_LOVR_TOUCH_JOYSTICK); break; + case BUTTON_THUMBSTICK: *result = field & (BRIDGE_LOVR_TOUCH_TOUCHPAD | BRIDGE_LOVR_TOUCH_JOYSTICK); break; case BUTTON_A: *result = field & BRIDGE_LOVR_TOUCH_A; break; case BUTTON_B: *result = field & BRIDGE_LOVR_TOUCH_B; break; case BUTTON_X: *result = field & BRIDGE_LOVR_TOUCH_X; break;
Update portserial_m.c add macro to aviod compile error with: rt_pin_mode(MODBUS_MASTER_RT_CONTROL_PIN_INDEX, PIN_MODE_OUTPUT);
@@ -57,7 +57,9 @@ BOOL xMBMasterPortSerialInit(UCHAR ucPORT, ULONG ulBaudRate, UCHAR ucDataBits, * set 485 mode receive and transmit control IO * @note MODBUS_MASTER_RT_CONTROL_PIN_INDEX need be defined by user */ +#if defined(RT_MODBUS_MASTER_USE_CONTROL_PIN) rt_pin_mode(MODBUS_MASTER_RT_CONTROL_PIN_INDEX, PIN_MODE_OUTPUT); +#endif /* set serial name */ if (ucPORT == 1) {
remove FCLK_CLK0 from bare_metal_test
+# Create clk_wiz +cell xilinx.com:ip:clk_wiz:5.3 pll_0 { + PRIMITIVE PLL + PRIM_IN_FREQ.VALUE_SRC USER + PRIM_IN_FREQ 125.0 + PRIM_SOURCE Differential_clock_capable_pin + CLKOUT1_USED true + CLKOUT1_REQUESTED_OUT_FREQ 125.0 + USE_RESET false +} { + clk_in1_p adc_clk_p_i + clk_in1_n adc_clk_n_i +} + # Create processing_system7 cell xilinx.com:ip:processing_system7:5.5 ps_0 { PCW_IMPORT_BOARD_PRESET cfg/red_pitaya.xml } { - M_AXI_GP0_ACLK ps_0/FCLK_CLK0 + M_AXI_GP0_ACLK pll_0/clk_out1 } # Create all required interconnections @@ -12,21 +26,12 @@ apply_bd_automation -rule xilinx.com:bd_rule:processing_system7 -config { Slave Disable } [get_bd_cells ps_0] -# Create proc_sys_reset -cell xilinx.com:ip:proc_sys_reset:5.0 rst_0 +# Create xlconstant +cell xilinx.com:ip:xlconstant:1.1 const_0 -# Create clk_wiz -cell xilinx.com:ip:clk_wiz:5.3 pll_0 { - PRIMITIVE PLL - PRIM_IN_FREQ.VALUE_SRC USER - PRIM_IN_FREQ 125.0 - PRIM_SOURCE Differential_clock_capable_pin - CLKOUT1_USED true - CLKOUT1_REQUESTED_OUT_FREQ 125.0 - USE_RESET false -} { - clk_in1_p adc_clk_p_i - clk_in1_n adc_clk_n_i +# Create proc_sys_reset +cell xilinx.com:ip:proc_sys_reset:5.0 rst_0 {} { + ext_reset_in const_0/dout } # ADC @@ -43,7 +48,7 @@ cell pavel-demin:user:axis_red_pitaya_adc:2.0 adc_0 {} { cell xilinx.com:ip:c_counter_binary:12.0 cntr_0 { Output_Width 32 } { - CLK ps_0/FCLK_CLK0 + CLK pll_0/clk_out1 } # Create xlslice
[io] mechanics_run.py: fix typo
@@ -775,7 +775,7 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5): siconos.io.mechanics_hdf5.data(self.log_data() [fun.__name__], 'timing', 1), endt) - if (isinstance(output, numbers.Number)) + if (isinstance(output, numbers.Number)): siconos.io.mechanics_hdf5.add_line( siconos.io.mechanics_hdf5.data(self.log_data() [fun.__name__],
Add section about download failures
@@ -96,6 +96,24 @@ Here are some examples. :: opt/spack/cray-sles15-zen2/gcc-11.2.0/hdf5-1.8.22-c3djozhlmrvy7wpu46f36qeakemiactw opt/spack/cray-sles15-zen2/gcc-11.2.0/cmake-3.14.7-nnahgnkkl2d2ty2us46we75pnjepci35 +Working around recurring download failures +------------------------------------------ + +Depending on context, recurring issues downloading a particular *dependent* package may arise. +When this happens, SSL certificate handling may be the cause. +A quick work-around is to disable this `security checking feature <https://spack.readthedocs.io/en/latest/config_yaml.html?highlight=ssl%20certificates#verify-ssl>`_ in Spack by adding the ``--insecure`` command-line option as the second option *just* after ``spack``. +Alternatively, you may be able to manually download the needed files and place them in a directory for Spack to use as a `mirror <https://spack.readthedocs.io/en/latest/mirrors.html?highlight=mirror#mirrors-mirrors-yaml>`_. +For example, starting from the point of having successfully downloaded the ``Python-3.7.13.tgz`` file somewhere, here are the Spack steps... :: + + spack mirror add my_local_mirror file://`pwd`/my_local_mirror + mkdir -p my_local_mirror/python + cp Python-3.7.13.tgz my_local_mirror/python/python-3.7.13.tgz + +Note that change in case of the file name. +Doing this will cause Spack to go get the file you manually downloaded. +The first step to add the mirror is only needed once. +To add additional files for which recurring download failures are occurring, just copy them into the mirror following the Spack naming conventions for packages. + The spack environment files ---------------------------
listen on all the available interfaces (not just on 127.0.0.1)
dyn_o_mite: - dyn_listen: 127.0.0.1:8101 + dyn_listen: 0.0.0.0:8101 data_store: 0 - listen: 127.0.0.1:8102 + listen: 0.0.0.0:8102 dyn_seed_provider: simple_provider servers: - 127.0.0.1:22122:1
in_systemd: workaround for systemd tail/skip issue 9934
@@ -183,9 +183,16 @@ struct flb_systemd_config *flb_systemd_config_create(struct flb_input_instance * } if (ctx->read_from_tail == FLB_TRUE) { - /* Jump to the end and skip last entry */ sd_journal_seek_tail(ctx->j); - sd_journal_next_skip(ctx->j, 1); + /* + * Skip up to 350 records until the end of journal is found. + * Workaround for bug https://github.com/systemd/systemd/issues/9934 + * Due to the bug, sd_journal_next() returns 2 last records of each journal file. + * 4 GB is the default journal limit, so with 25 MB/file we may get + * up to 4096/25*2 ~= 350 old log messages. See also fluent-bit PR #1565. + */ + ret = sd_journal_next_skip(ctx->j, 350); + flb_debug("[in_systemd] jump to the end of journal and skip %d last entries", ret); } else { sd_journal_seek_head(ctx->j);
[numerics] fix correct rule for relative error evaluation in VI
@@ -80,7 +80,10 @@ int variationalInequality_computeError( *error = cblas_dnrm2(n , wtmp , incx); /* Computes error */ - *error = *error / (norm_q + 1.0); + if (fabs(norm_q) > DBL_EPSILON) + *error /= norm_q; + + DEBUG_PRINTF("error = %e\n",*error); if (*error > tolerance) { if (verbose > 1)
actions: path not set in step
@@ -60,10 +60,6 @@ jobs: echo "JAVA_HOME=/Library/Java/JavaVirtualMachines/openjdk.jdk/Contents/Home" >> $GITHUB_ENV echo "/usr/local/opt/openjdk/bin" >> $GITHUB_PATH sudo ln -sfn /usr/local/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk - which java - which javac - java --version - javac --version gem install test-unit --no-document pip2 install cheetah # Required by kdb-gen brew tap homebrew/services
Fix multiview limit; When multiview is not supported (although technically lovr requires it), the renderSize limit for array layers was zero, which meant no render passes would work. Instead, make sure it's at least 1, which is more correct.
@@ -1885,7 +1885,7 @@ bool gpu_init(gpu_config* config) { config->limits->textureLayers = limits->maxImageArrayLayers; config->limits->renderSize[0] = limits->maxFramebufferWidth; config->limits->renderSize[1] = limits->maxFramebufferHeight; - config->limits->renderSize[2] = multiviewProperties.maxMultiviewViewCount; + config->limits->renderSize[2] = MAX(multiviewProperties.maxMultiviewViewCount, 1); config->limits->uniformBuffersPerStage = limits->maxPerStageDescriptorUniformBuffers; config->limits->storageBuffersPerStage = limits->maxPerStageDescriptorStorageBuffers; config->limits->sampledTexturesPerStage = limits->maxPerStageDescriptorSampledImages;
Added a heapmem reallocation test.
@@ -186,6 +186,50 @@ UNIT_TEST(max_alloc) UNIT_TEST_END(); } /*****************************************************************************/ +UNIT_TEST_REGISTER(reallocations, "Heapmem reallocations"); +UNIT_TEST(reallocations) +{ +#define INITIAL_SIZE 100 + + UNIT_TEST_BEGIN(); + + uint8_t *ptr1 = heapmem_realloc(NULL, INITIAL_SIZE); + UNIT_TEST_ASSERT(ptr1 != NULL); + + for(size_t i = 0; i < INITIAL_SIZE; i++) { + ptr1[i] = i + 128; + } + + /* Extend the initial array. */ + uint8_t *ptr2 = heapmem_realloc(ptr1, INITIAL_SIZE * 2); + UNIT_TEST_ASSERT(ptr2 != NULL); + + for(size_t i = 0; i < INITIAL_SIZE; i++) { + /* Check that the bytes of the lower half have been preserved. */ + UNIT_TEST_ASSERT(ptr2[i] == (uint8_t)(i + 128)); + /* Initialize the upper half of the reallocated area. */ + ptr2[i + INITIAL_SIZE] = i * 2 + 128; + } + + /* Reduce the extended array. */ + uint8_t *ptr3 = heapmem_realloc(ptr2, (2 * INITIAL_SIZE) / 3); + UNIT_TEST_ASSERT(ptr3 != NULL); + + /* Check that the array is correctly preserved after + the final reallocation. */ + for(size_t i = 0; i < (2 * INITIAL_SIZE) / 3; i++) { + if(i < INITIAL_SIZE) { + UNIT_TEST_ASSERT(ptr2[i] == (uint8_t)(i + 128)); + } else { + UNIT_TEST_ASSERT(ptr2[i] == (uint8_t)(i * 2 + 128)); + } + } + + UNIT_TEST_ASSERT(heapmem_realloc(ptr3, 0) == NULL); + + UNIT_TEST_END(); +} +/*****************************************************************************/ UNIT_TEST_REGISTER(stats_check, "Heapmem statistics validation"); UNIT_TEST(stats_check) { @@ -222,6 +266,7 @@ PROCESS_THREAD(test_heapmem_process, ev, data) UNIT_TEST_RUN(do_many_allocations); UNIT_TEST_RUN(max_alloc); UNIT_TEST_RUN(invalid_freeing); + UNIT_TEST_RUN(reallocations); UNIT_TEST_RUN(stats_check); if(!UNIT_TEST_PASSED(do_many_allocations) ||
stm32/mboot: Fix mp_hal_delay_us() and add mp_hal_ticks_ms().
// These bits are used to detect valid application firmware at APPLICATION_ADDR #define APP_VALIDITY_BITS (0x00000003) +// For 1ms system ticker. +static volatile uint32_t systick_ms; + // Global dfu state dfu_context_t dfu_context SECTION_NOZERO_BSS; @@ -104,6 +107,10 @@ uint32_t get_le32(const uint8_t *b) { return b[0] | b[1] << 8 | b[2] << 16 | b[3] << 24; } +mp_uint_t mp_hal_ticks_ms(void) { + return systick_ms; +} + void mp_hal_delay_us(mp_uint_t usec) { // use a busy loop for the delay // sys freq is always a multiple of 2MHz, so division here won't lose precision @@ -113,11 +120,10 @@ void mp_hal_delay_us(mp_uint_t usec) { const uint32_t ucount = SystemCoreClock / 2000000 * usec / 2; #endif for (uint32_t count = 0; ++count <= ucount;) { + __NOP(); } } -static volatile uint32_t systick_ms; - void mp_hal_delay_ms(mp_uint_t ms) { if (__get_PRIMASK() == 0) { // IRQs enabled, use systick
nshlib: Remove a dangling function that is no longer used after last mount/df chagnes
#include "nsh.h" #include "nsh_console.h" -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -/**************************************************************************** - * Private Types - ****************************************************************************/ - -/**************************************************************************** - * Private Function Prototypes - ****************************************************************************/ - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/**************************************************************************** - * Private Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: get_fstype - ****************************************************************************/ - -#if CONFIG_NFILE_DESCRIPTORS > 0 && !defined(CONFIG_DISABLE_MOUNTPOINT) && \ - defined(CONFIG_FS_READABLE) && !defined(CONFIG_NSH_DISABLE_MOUNT) -static const char* get_fstype(FAR struct statfs *statbuf) -{ - FAR const char *fstype; - - /* Get the file system type */ - - switch (statbuf->f_type) - { -#ifdef CONFIG_FS_FAT - case MSDOS_SUPER_MAGIC: - fstype = "vfat"; - break; -#endif - -#ifdef CONFIG_FS_ROMFS - case ROMFS_MAGIC: - fstype = "romfs"; - break; -#endif - -#ifdef CONFIG_FS_TMPFS - case TMPFS_MAGIC: - fstype = "tmpfs"; - break; -#endif - -#ifdef CONFIG_FS_BINFS - case BINFS_MAGIC: - fstype = "binfs"; - break; -#endif - -#ifdef CONFIG_FS_NXFFS - case NXFFS_MAGIC: - fstype = "nxffs"; - break; -#endif - -#ifdef CONFIG_NFS - case NFS_SUPER_MAGIC: - fstype = "nfs"; - break; -#endif - -#ifdef CONFIG_FS_SMARTFS - case SMARTFS_MAGIC: - fstype = "smartfs"; - break; -#endif - -#ifdef CONFIG_FS_PROCFS - case PROCFS_MAGIC: - fstype = "procfs"; - break; -#endif - -#ifdef CONFIG_FS_UNIONFS - case UNIONFS_MAGIC: - fstype = "unionfs"; - break; -#endif - -#ifdef CONFIG_FS_HOSTFS - case HOSTFS_MAGIC: - fstype = "hostfs"; - break; -#endif - - default: - fstype = "Unrecognized"; - break; - } - - return fstype; -} -#endif - /**************************************************************************** * Public Functions ****************************************************************************/
BugID:19144184:[WhiteScan] [716664] [CONSTANT_EXPRESSION_RESULT] /home/iot/WhiteScan/platform/arch/xtensa/lx6/backtrace.c
@@ -58,7 +58,7 @@ static int windowed_register_backtrace(uint32_t pc, uint32_t sp, while (lvl++ < BACK_TRACE_LIMIT) { psp = backtrace_sp; - if ((backtrace_sp > 0x3fffffff0UL) || ((backtrace_sp & 0xf) != 0)) + if ((backtrace_sp > 0x3ffffff0UL) || ((backtrace_sp & 0xf) != 0)) break; backtrace_sp = *((uint32_t*) (backtrace_sp - 0x10 + 4)); @@ -69,7 +69,7 @@ static int windowed_register_backtrace(uint32_t pc, uint32_t sp, if (backtrace_pc < 0x40000000) break; - if (backtrace_pc == (char *)krhino_task_deathbed) { + if (backtrace_pc == (uint32_t)krhino_task_deathbed) { print_func("backtrace : ^task entry^\n"); break; } @@ -94,8 +94,8 @@ int backtraceContext(char *PC, char *LR, int *SP, print_func(prtbuff); - backtrace_pc = LR; - backtrace_sp = SP; + backtrace_pc = (uint32_t)LR; + backtrace_sp = (uint32_t)SP; lvl = windowed_register_backtrace(backtrace_pc, backtrace_sp, print_func); print_func("======== Call stack End ========\n"); @@ -119,7 +119,7 @@ int backtrace_now(int (*print_func)(const char *fmt, ...)) print_func("======= Call stack Begin =======\n"); - backtrace_pc = PC; + backtrace_pc = (uint32_t)PC; backtrace_sp = (uint32_t)SP + 32; /*call getPCnSP result to sp -32*/ lvl = windowed_register_backtrace(backtrace_pc, backtrace_sp, print_func); @@ -154,7 +154,7 @@ int backtrace_task(char *taskname, int (*print_func)(const char *fmt, ...)) SP = task->task_stack; backtrace_pc = *((uint32_t *) (SP - 0x10)); - backtrace_sp = SP; + backtrace_sp = (uint32_t)SP; lvl = windowed_register_backtrace(backtrace_pc, backtrace_sp, print_func); print_func("======== Call stack End ========\n");
config-tool: find pci hole based on all pci hostbridge Not all pci hostbridge has a device on it. Remove the logic which assumes there is at least one device. Acked-by: Anthony Xu
@@ -301,25 +301,17 @@ def get_pt_devs_io_port_passthrough(board_etree, scenario_etree): return dev_list def get_pci_hole_native(board_etree): - resources = board_etree.xpath(f"//bus[@type = 'pci']/device[@address]/resource[@type = 'memory' and @len != '0x0']") resources_hostbridge = board_etree.xpath("//bus/resource[@type = 'memory' and @len != '0x0' and not(starts-with(@id, 'bar')) and not(@width)]") - low_mem = set() high_mem = set() for resource_hostbridge in resources_hostbridge: start = resource_hostbridge.get('min') end = resource_hostbridge.get('max') if start is not None and end is not None and int(start, 16) >= PCI_HOLE_THRESHOLD: - for resource in resources: - resource_start = int(resource.get('min'), 16) - resource_end = int(resource.get('max'), 16) - if resource_start >= int(start, 16) and resource_end <= int(end, 16): - if resource_end < 4 * SIZE_G: + if int(end,16) < 4 * SIZE_G: low_mem.add(AddrWindow(int(start, 16), int(end, 16))) - break else: high_mem.add(AddrWindow(int(start, 16), int(end, 16))) - break return list(sorted(low_mem)), list(sorted(high_mem)) def get_io_port_range_native(board_etree):
ci regression. reverting
@@ -191,7 +191,7 @@ From RhoElements 2.1 onwards the default value was changed to work out of the bo </PARAMS> </METHOD> - <METHOD name="currentLocation" runInThread="ui"> + <METHOD name="currentLocation"> <DESC>Returns the relative url (location) of the current page(without server and port); the last URL loaded to WebView from Ruby controller action. If you open your page in WebView, and after it makes a few jumps by linking (for example, to outside web adresses for example), currentLocation will still return the initial url opened in WebView. Also, if you use JQMobile, current_location has the initial URL, but does not reflect the actual window.location containing the JQMobile additional address by adding #, etc. See currentUrl.</DESC>
add decrypt test command
int usage(int argc, char **argv, const char *msg) { fprintf(stderr, "Usage: %s %s\n", argv[0], - msg ? msg : "[ sign | verify | jwk2cert | key2jwk | enckey | hash_base64url | timestamp | uuid ] <options>"); + msg ? msg : "[ sign | verify | decrypt | jwk2cert | key2jwk | enckey | hash_base64url | timestamp | uuid ] <options>"); return -1; } @@ -223,6 +223,49 @@ int verify(int argc, char **argv, apr_pool_t *pool) { return 0; } +int decrypt(int argc, char **argv, apr_pool_t *pool) { + + if (argc <= 3) + return usage(argc, argv, "decrypt <serialized-jwt-file> <jwk-file>"); + + char *s_jwt = NULL, *s_jwk = NULL; + + if (file_read(pool, argv[2], &s_jwt) != 0) + return -1; + if (file_read(pool, argv[3], &s_jwk) != 0) + return -1; + + + apr_hash_t *keys = apr_hash_make(pool); + oidc_jose_error_t oidc_err; + + oidc_jwk_t *jwk = oidc_jwk_parse(pool, s_jwk, &oidc_err); + if (jwk == NULL) { + fprintf(stderr, + "could not import JWK: %s [file: %s, function: %s, line: %d]\n", + oidc_err.text, oidc_err.source, oidc_err.function, + oidc_err.line); + return -1; + } + + apr_hash_set(keys, jwk->kid ? jwk->kid : "dummy", APR_HASH_KEY_STRING, jwk); + + char *plaintext = NULL; + if (oidc_jwe_decrypt(pool, s_jwt, keys, &plaintext, &oidc_err, TRUE) == FALSE) { + fprintf(stderr, + "oidc_jwe_decrypt failed: %s [file: %s, function: %s, line: %d]\n", + oidc_err.text, oidc_err.source, oidc_err.function, + oidc_err.line); + return -1; + } + + fprintf(stdout, "%s", plaintext); + oidc_jwk_destroy(jwk); + + return 0; +} + + int mkcert(RSA *rsa, X509 **x509p, EVP_PKEY **pkeyp, int serial, int days) { X509 *x; EVP_PKEY *pk; @@ -571,6 +614,9 @@ int main(int argc, char **argv, char **env) { if (strcmp(argv[1], "verify") == 0) return verify(argc, argv, pool); + if (strcmp(argv[1], "decrypt") == 0) + return decrypt(argc, argv, pool); + if (strcmp(argv[1], "jwk2cert") == 0) return jwk2cert(argc, argv, pool);
abis/linux: Add struct ip_mreq
@@ -39,6 +39,11 @@ struct sockaddr_in6 { uint32_t sin6_scope_id; }; +struct ip_mreq { + struct in_addr imr_multiaddr; + struct in_addr imr_interface; +}; + struct ipv6_mreq { struct in6_addr ipv6mr_multiaddr; unsigned ipv6mr_interface;
driver/retimer/tdp142.c: Format with clang-format BRANCH=none TEST=none
static enum ec_error_list tdp142_write(int offset, int data) { - return i2c_write8(TDP142_I2C_PORT, - TDP142_I2C_ADDR, - offset, data); - + return i2c_write8(TDP142_I2C_PORT, TDP142_I2C_ADDR, offset, data); } static enum ec_error_list tdp142_read(int offset, int *regval) { - return i2c_read8(TDP142_I2C_PORT, - TDP142_I2C_ADDR, - offset, regval); - + return i2c_read8(TDP142_I2C_PORT, TDP142_I2C_ADDR, offset, regval); } enum ec_error_list tdp142_set_ctlsel(enum tdp142_ctlsel selection)
Fix typo in reorderbuffer.c. Author: Zhijie Hou Discussion:
@@ -2119,13 +2119,13 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, * Mapped catalog tuple without data, emitted while * catalog table was in the process of being rewritten. We * can fail to look up the relfilenode, because the - * relmapper has no "historic" view, in contrast to normal - * the normal catalog during decoding. Thus repeated - * rewrites can cause a lookup failure. That's OK because - * we do not decode catalog changes anyway. Normally such - * tuples would be skipped over below, but we can't - * identify whether the table should be logically logged - * without mapping the relfilenode to the oid. + * relmapper has no "historic" view, in contrast to the + * normal catalog during decoding. Thus repeated rewrites + * can cause a lookup failure. That's OK because we do not + * decode catalog changes anyway. Normally such tuples + * would be skipped over below, but we can't identify + * whether the table should be logically logged without + * mapping the relfilenode to the oid. */ if (reloid == InvalidOid && change->data.tp.newtuple == NULL &&
For ppc gcc, implement 64-bit compare_exchange and fetch_add with asm. While xlc defines __64BIT__, gcc does not. Due to this oversight in commit gcc builds continued implementing 64-bit atomics by way of intrinsics. Back-patch to v13, where that commit first appeared. Discussion:
@@ -32,14 +32,14 @@ typedef struct pg_atomic_uint32 } pg_atomic_uint32; /* 64bit atomics are only supported in 64bit mode */ -#ifdef __64BIT__ +#if SIZEOF_VOID_P >= 8 #define PG_HAVE_ATOMIC_U64_SUPPORT typedef struct pg_atomic_uint64 { volatile uint64 value pg_attribute_aligned(8); } pg_atomic_uint64; -#endif /* __64BIT__ */ +#endif /* * This mimics gcc __atomic_compare_exchange_n(..., __ATOMIC_SEQ_CST), but
ds lyb BUGFIX init path
@@ -617,6 +617,8 @@ srlyb_get_path(const char *plg_name, const char *mod_name, sr_datastore_t ds, ch int r = 0, rc; const char *prefix; + *path = NULL; + switch (ds) { case SR_DS_STARTUP: if (SR_STARTUP_PATH[0]) {
bluedroid: use the new socket registering API
#include <sys/fcntl.h> #include "esp_vfs.h" #include "esp_vfs_dev.h" +#include "lwip/opt.h" // just for LWIP_SOCKET_OFFSET +/** + * BTC_SPP_FDS is the number of file descriptors this VFS driver registers + */ +#define BTC_SPP_FDS 32 #if (defined BTC_SPP_INCLUDED && BTC_SPP_INCLUDED == TRUE) @@ -828,7 +833,7 @@ static ssize_t spp_vfs_read(int fd, void * dst, size_t size) void btc_spp_vfs_register() { esp_vfs_t vfs = { - .flags = ESP_VFS_FLAG_DEFAULT | ESP_VFS_FLAG_SHARED_FD_SPACE, + .flags = ESP_VFS_FLAG_DEFAULT, .write = spp_vfs_write, .open = NULL, .fstat = NULL, @@ -836,7 +841,14 @@ void btc_spp_vfs_register() .read = spp_vfs_read, .fcntl = NULL }; - ESP_ERROR_CHECK(esp_vfs_register_socket_space(&vfs, NULL, &spp_local_param.spp_min_fd, &spp_local_param.spp_max_fd)); + // File descriptors from LWIP_SOCKET_OFFSET to MAX_FDS-1 are registered + // for sockets. So here we register from (LWIP_SOCKET_OFFSET - BTC_SPP_FDS) + // to (LWIP_SOCKET_OFFSET-1) leaving unregistered from 0 to + // .(LWIP_SOCKET_OFFSET - BTC_SPP_FDS - 1). + spp_local_param.spp_min_fd = LWIP_SOCKET_OFFSET - BTC_SPP_FDS; + assert(spp_local_param.spp_min_fd >= 0); //TODO return error instead + spp_local_param.spp_max_fd = LWIP_SOCKET_OFFSET; + ESP_ERROR_CHECK(esp_vfs_register_fd_range(&vfs, NULL, spp_local_param.spp_min_fd, spp_local_param.spp_max_fd));//TODO return error if fails spp_local_param.spp_fd = spp_local_param.spp_min_fd; }
ARMv8: setting l0page tables for when doing PSCI boot
@@ -1162,6 +1162,7 @@ static struct sysret handle_debug_syscall(int msg) } #include <psci.h> +#include <barrelfish_kpi/arm_core_data.h> /* XXX - function documentation is inconsistent. */ /** @@ -1237,6 +1238,8 @@ void sys_syscall(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3, case SYSCALL_DEBUG: if (a1 == DEBUG_PSCI_CPU_ON) { printf("Invoking PSCI on: cpu=%lx, entry=%lx, context=%lx\n", a2, a3, a4); + struct armv8_core_data *cd = (struct armv8_core_data *)local_phys_to_mem(a4); + cd->kernel_l0_pagetable = sysreg_read_ttbr1_el1(); r.error = psci_cpu_on(a2, a3, a4); break; }
preinitialize CLASS parser reduces Valgrind errors from 6574 to 22 :)
@@ -263,6 +263,12 @@ static double ccl_get_class_As(ccl_cosmology *cosmo, struct file_content *fc, in static void ccl_fill_class_parameters(ccl_cosmology * cosmo, struct file_content * fc, int parser_length, int * status) { + // initialize fc fields + //silences Valgrind's "Conditional jump or move depends on uninitialised value" warning + for (int i = 0; i< parser_length; i++){ + strcpy(fc->name[i]," "); + strcpy(fc->value[i]," "); + } strcpy(fc->name[0],"output"); strcpy(fc->value[0],"mPk");
ngtcp2_unreachable: Handle EINTR
#include "ngtcp2_unreachable.h" #include <stdio.h> +#include <errno.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif /* HAVE_UNISTD_H */ @@ -58,7 +59,8 @@ void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { } #ifndef WIN32 - write(STDERR_FILENO, buf, (size_t)rv); + while (write(STDERR_FILENO, buf, (size_t)rv) == -1 && errno == EINTR) + ; #else /* WIN32 */ _write(_fileno(stderr), buf, (unsigned int)rv); #endif /* WIN32 */
Tests: added more tests for "query" routing pattern.
@@ -1342,19 +1342,27 @@ class TestRouting(TestApplicationProto): assert self.get(url='/?a=b&c=d e')['status'] == 200 def test_routes_match_query_array(self): - self.route_match({ - "query": ["foo", "bar"] - }) + self.route_match({"query": ["foo", "bar"]}) - assert self.get()['status'] == 404, 'arr' - assert self.get(url='/?foo')['status'] == 200, 'arr 1' - assert self.get(url='/?bar')['status'] == 200, 'arr 2' + assert self.get()['status'] == 404, 'no args' + assert self.get(url='/?foo')['status'] == 200, 'arg first' + assert self.get(url='/?bar')['status'] == 200, 'arg second' assert 'success' in self.conf_delete( 'routes/0/match/query/1' - ), 'match query array configure 2' + ), 'query array remove second' + + assert self.get(url='/?foo')['status'] == 200, 'still arg first' + assert self.get(url='/?bar')['status'] == 404, 'no arg second' + + self.route_match({"query": ["!f", "foo"]}) + + assert self.get(url='/?f')['status'] == 404, 'negative arg' + assert self.get(url='/?fo')['status'] == 404, 'negative arg 2' + assert self.get(url='/?foo')['status'] == 200, 'negative arg 3' - assert self.get(url='/?bar')['status'] == 404, 'arr 2' + self.route_match({"query": []}) + assert self.get()['status'] == 200, 'empty array' def test_routes_match_query_invalid(self): self.route_match_invalid({"query": [1]})
[lwIP] Remove the unnecessary log in SConscript.
@@ -69,7 +69,6 @@ path = [GetCurrentDir() + '/src', if not GetDepend('RT_USING_POSIX') or not GetDepend('RT_USING_DFS_NET'): path += [GetCurrentDir() + '/src/include/posix'] - print('include /src/include/posix') if GetDepend(['RT_LWIP_SNMP']): src += snmp_src
README: Fixed typos. These closes closes closes issues on GitHub.
@@ -179,7 +179,7 @@ current working directory. ##### Mercurial Repository - 1. If don't already have the Mercurial software, download and install it. + 1. If you don't already have the Mercurial software, download and install it. For example, on Ubuntu systems, run this command: ``` @@ -194,7 +194,7 @@ current working directory. ##### GitHub Repository - 1. If don't already have the Git software, download it. See the + 1. If you don't already have the Git software, download it. See the [GitHub documentation](https://help.github.com/). 2. Download the Unit sources: @@ -465,7 +465,7 @@ several characteristics of the application, including the language it's written in, the number of application worker processes to run, the directory with the file or files for the application, and parameters that vary by language. -This example runs three workers of the PHP application named **blogs** using the +This example runs 20 workers of the PHP application named **blogs** using the files found in the **/www/blogs/scripts** directory. The default launch file when the URL doesn't specify the PHP file is **index.php**.
CI: simplify script tests
TESTS=$(wildcard ??-*.sh) TESTLOGS=$(patsubst %.sh,%.testlog,$(TESTS)) -FAILLOGS=$(patsubst %.sh,%.faillog,$(TESTS)) CONTIKI=../.. tests: $(TESTLOGS) -report: clean tests - @echo | grep -s -e '' - $(TESTLOGS) $(FAILLOGS) > $@ || true - -summary: report +summary: clean tests ifeq ($(TESTS),) @echo No tests > $@ else - @egrep -e ' OK| FAIL' $< > $@ - @ls -1 ??-*.faillog > /dev/null 2>&1; [ $$? = 0 ] && tail -v ??-*.log ??-*.faillog >> $@ || true + @cat $(TESTLOGS) > $@ endif all: cooja clean tests @@ -23,7 +18,7 @@ all: cooja clean tests @bash "$(basename $@).sh" "$(CONTIKI)" clean: - @rm -f $(TESTLOGS) $(FAILLOGS) report summary + @rm -f *.*log report summary cooja: $(CONTIKI)/tools/cooja/dist/cooja.jar $(CONTIKI)/tools/cooja/dist/cooja.jar:
doc: fixup missing .
@@ -292,7 +292,7 @@ Thanks to Michael Zronek and Vanessa Kos. test is not tagged as such. *(Lukas Winkler)* - Remove `elektra-gcc-configure-debian-withspace` test. We now test for compatibility of spaced build paths during normal tests. *(Lukas Winkler)* -- Check for source formatting during early test stages *(Lukas Winkler)* +- Check for source formatting during early test stages. *(Lukas Winkler)* ### Travis
Adding correction to documentation As picked up in review, this commit modifies the documentation by removing some wording that is now superfluous given the removal of the mode parameter.
@@ -714,11 +714,6 @@ int mbedtls_rsa_rsaes_oaep_encrypt( mbedtls_rsa_context *ctx, * hold the decryption of the particular ciphertext provided, * the function returns \c MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * - * - * \note Alternative implementations of RSA need not support - * mode being set to #MBEDTLS_RSA_PUBLIC and might instead - * return #MBEDTLS_ERR_PLATFORM_FEATURE_UNSUPPORTED. - * * \param ctx The initialized RSA context to use. * \param f_rng The RNG function. This is used for blinding and should * be provided; see mbedtls_rsa_private() for more. @@ -755,10 +750,6 @@ int mbedtls_rsa_pkcs1_decrypt( mbedtls_rsa_context *ctx, * hold the decryption of the particular ciphertext provided, * the function returns #MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * - * \note Alternative implementations of RSA need not support - * mode being set to #MBEDTLS_RSA_PUBLIC and might instead - * return #MBEDTLS_ERR_PLATFORM_FEATURE_UNSUPPORTED. - * * \param ctx The initialized RSA context to use. * \param f_rng The RNG function. This is used for blinding and should * be provided; see mbedtls_rsa_private() for more. @@ -797,10 +788,6 @@ int mbedtls_rsa_rsaes_pkcs1_v15_decrypt( mbedtls_rsa_context *ctx, * ciphertext provided, the function returns * #MBEDTLS_ERR_RSA_OUTPUT_TOO_LARGE. * - * \note Alternative implementations of RSA need not support - * mode being set to #MBEDTLS_RSA_PUBLIC and might instead - * return #MBEDTLS_ERR_PLATFORM_FEATURE_UNSUPPORTED. - * * \param ctx The initialized RSA context to use. * \param f_rng The RNG function. This is used for blinding and should * be provided; see mbedtls_rsa_private() for more.
Updated MSVC project.
<ClCompile Include="..\..\libxml\ruby_xml_xpath_context.c" /> <ClCompile Include="..\..\libxml\ruby_xml_xpath_expression.c" /> <ClCompile Include="..\..\libxml\ruby_xml_xpath_object.c" /> - <ClCompile Include="..\..\libxml\ruby_xml_xpointer.c" /> </ItemGroup> <ItemGroup> <ClInclude Include="..\..\libxml\ruby_libxml.h" /> <ClInclude Include="..\..\libxml\ruby_xml_xpath_context.h" /> <ClInclude Include="..\..\libxml\ruby_xml_xpath_expression.h" /> <ClInclude Include="..\..\libxml\ruby_xml_xpath_object.h" /> - <ClInclude Include="..\..\libxml\ruby_xml_xpointer.h" /> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <ImportGroup Label="ExtensionTargets">
Add abbreviated terms to system requirements files Add abbreviated terms table to BoAT_System_Requirements_en.md.
### Overview This technical paper describes the system requirements of the BoAT Framework (C language version) for cellular modules. BoAT is an SDK that runs on the module's application processor. For the OpenCPU cellular module, BoAT is linked and called by the application as a library. For non-OpenCPU cellular modules, BoAT's API needs to be extended to AT commands for applications on the upper MCU to call. +### Abbreviated Terms +|Term |Explanation | +|:----- |:--------------------------- | +|BoAT|Blockchain of AI Things| +|SDK|Software Development Kit| +|API|Application Programming Interface| +|MCU|Microcontroller Unit| +|RTOS|Real Time Operating System| +|TRNG|True Random Number Generator| +|CSPRNG|Cryptographically Secure Pseudo-Random Number Generator| +|RTC|Real Time Clock| +|NTP|Network Time Protocol| +|HTTP|Hyper Text Transfer Protocol| +|HTTPs|Hyper Text Transfer Protocol Secure| +|CoAP|Constrained Application Protocol| +|MQTT|Message Queuing Telemetry Transport| +|TCP|Transmission Control Protocol| +|TEE|Trusted Execution Environment| +|TA|Trusted Application| +|ECDSA|Elliptic Curve Digital Signature Algorithm| +|SHA2|Secure Hash Algorithm 2| + + ## Part 1 Storage Requirements For Ethereum/PlatONE/FISCO BCOS, the storage requirements of the BoAT Framework (C language version) itself are:
oc_cloud:persist CloudConf if provisioned via API
@@ -167,6 +167,8 @@ oc_cloud_provision_conf_resource(oc_cloud_context_t *ctx, const char *server, ctx->store.status = OC_CLOUD_INITIALIZED; ctx->cps = OC_CPS_READYTOREGISTER; + cloud_store_dump_async(&ctx->store); + if (ctx->cloud_manager) { cloud_reconnect(ctx); }
Restore original nsAttsSize if reallocation fails
@@ -3264,6 +3264,7 @@ storeAtts(XML_Parser parser, const ENCODING *enc, int j; /* hash table index */ unsigned long version = nsAttsVersion; int nsAttsSize = (int)1 << nsAttsPower; + unsigned char oldNsAttsPower = nsAttsPower; /* size of hash table must be at least 2 * (# of prefixed attributes) */ if ((nPrefixes << 1) >> nsAttsPower) { /* true for nsAttsPower = 0 */ NS_ATT *temp; @@ -3273,8 +3274,11 @@ storeAtts(XML_Parser parser, const ENCODING *enc, nsAttsPower = 3; nsAttsSize = (int)1 << nsAttsPower; temp = (NS_ATT *)REALLOC(nsAtts, nsAttsSize * sizeof(NS_ATT)); - if (!temp) + if (!temp) { + /* Restore actual size of memory in nsAtts */ + nsAttsPower = oldNsAttsPower; return XML_ERROR_NO_MEMORY; + } nsAtts = temp; version = 0; /* force re-initialization of nsAtts hash table */ }
Weather -> clock in clock tile's readme
-# Weather tile for Urbit +# Clock tile for Urbit To install this on your Urbit planet: 1. In your Urbit's Dojo, run |mount % 2. Write in the filepath to your Urbit's pier in the urbitrc-sample file in this repository, then copy it to .urbitrc in this directory. 3. Run `npm install` in terminal in this directory. 4. Run `gulp default` in terminal in this directory. -5. Run |start %weather in your Urbit's Dojo. +5. Run |start %clock in your Urbit's Dojo. To see it, navigate to your Urbit's url and add /~home to the URL path.
Change directory change directory 2
@@ -534,6 +534,8 @@ ALLOW yt/jaeger/plugin -> vendor/github.com/gogo ALLOW yabs/telephony/platform/internal/rtp -> vendor/github.com/wernerd/GoRTP # CONTRIB-1518 client for monkey-patched Apache Pulsar by TuyaInc. responsible: jock@ +ALLOW alice/iot/adapters/tuya_adapter -> vendor/github.com/TuyaInc/tuya_pulsar_sdk_go +ALLOW alice/iot/adapters/tuya_adapter/server -> vendor/github.com/sirupsen/logrus ALLOW quasar/iot/adapters/tuya_adapter -> vendor/github.com/TuyaInc/tuya_pulsar_sdk_go ALLOW quasar/iot/adapters/tuya_adapter/server -> vendor/github.com/sirupsen/logrus
Register reset in the PIM to help timing closure.
@@ -167,6 +167,13 @@ module platform_shim_ccip_std_afu // Instantiate the AFU // ==================================================================== + // Add a timing stage to reset + logic afu_cp2af_softReset_q = 1'b1; + always @(posedge `PLATFORM_PARAM_CCI_P_CLOCK) + begin + afu_cp2af_softReset_q <= afu_cp2af_softReset; + end + `AFU_TOP_MODULE_NAME #( `PLATFORM_ARG_LIST_BEGIN @@ -191,7 +198,7 @@ module platform_shim_ccip_std_afu .pClkDiv4(pClkDiv4), .uClk_usr(uClk_usr), .uClk_usrDiv2(uClk_usrDiv2), - .pck_cp2af_softReset(afu_cp2af_softReset), + .pck_cp2af_softReset(afu_cp2af_softReset_q), `ifdef AFU_TOP_REQUIRES_POWER_2BIT .pck_cp2af_pwrState(afu_cp2af_pwrState), `endif
replace libretro-common 45
@@ -709,7 +709,7 @@ SOURCES_C += $(NP2_PATH)/sdl2/libretro/libretro-common/compat/compat_strcasestr ifeq ($(platform), switch) SOURCES_C += $(NP2_PATH)/sdl2/libretro/libretro-common/rthreads/switch_pthread.c -else +else ifeq (,$(filter $(platform),ngc wii wiiu)) SOURCES_C += $(NP2_PATH)/sdl2/libretro/libretro-common/rthreads/rthreads.c endif
[CI] travis: add cxx11 flag for opensuse
@@ -36,8 +36,8 @@ env: - TASK=default:distrib=ubuntu,18.04 - TASK=default:distrib=debian,latest - TASK=default:distrib=fedora,latest:pkgs-=atlas-lapack,:pkgs+=openblas-lapacke, - - TASK=default:distrib=opensuse,42.3:pkgs-=atlas-lapack,:pkgs+=openblas-lapacke, - - TASK=default:distrib=opensuse,42.3:ci_config=with_mumps-seq:pkgs-=atlas-lapack,:pkgs+=openblas-lapacke,mumps-seq + - TASK=default:distrib=opensuse,42.3:ci_config=with_cxx11:pkgs-=atlas-lapack,:pkgs+=openblas-lapacke, + - TASK=default:distrib=opensuse,42.3:ci_config=with_cxx11,with_mumps-seq:pkgs-=atlas-lapack,:pkgs+=openblas-lapacke,mumps-seq - TASK=default:distrib=ubuntu,17.10:ci_config=with_cxx11 - TASK=default:pkgs+=lpsolve, - TASK=default:pkgs-=atlas-lapack:pkgs+=openblas-lapacke,
docs: Corrects OpenOCD command usage in logtrace readme
@@ -107,21 +107,21 @@ To run the example and retrieve the log from the host, do the following: 6. In telnet execute the following command: ``` - esp32 apptrace start file://adc0.log file://adc1.log 0 9000 5 0 0 + esp32 apptrace start file://adc.log 0 9000 5 0 0 ``` - This command should collect 9000 bytes of log data and save them to `adc0.log` file in `~/esp/openocd-esp32` folder. The `adc1.log` file will be empty / is not used. + This command should collect 9000 bytes of log data and save them to `adc.log` file in `~/esp/openocd-esp32` folder. 7. Decode and print out retrieved log file by executing: ``` - $IDF_PATH/tools/esp_app_trace/logtrace_proc.py ~/esp/openocd-esp32/adc0.log ~/esp/app_trace_to_host/build/app_trace_to_host_test.elf + $IDF_PATH/tools/esp_app_trace/logtrace_proc.py ~/esp/openocd-esp32/adc.log ~/esp/app_trace_to_host/build/app_trace_to_host_test.elf ``` This should provide a similar output: ``` - Parse trace file '/user-home/esp/openocd-esp32/adc0.log'... + Parse trace file '/user-home/esp/openocd-esp32/adc.log'... Unprocessed 7 bytes of log record args! Parsing completed. ====================================================================
in_tail: fix process_content when FLB_REGEX is false file->buf_data should have been data, which was missed in
@@ -265,7 +265,7 @@ static int process_content(struct flb_tail_file *file, off_t *bytes) #else flb_time_get(&out_time); flb_tail_file_pack_line(out_sbuf, out_pck, &out_time, - file->buf_data, len, file); + data, len, file); #endif go_next:
kernel/binary_manager_load : Add Debug message for checking g_sem_list If g_sem_list is empty, it means there is no kernel semaphore which used by threads. For checking this, added the debug message.
@@ -547,7 +547,10 @@ void binary_manager_release_binary_sem(int bin_idx) flags = irqsave(); sem = (sem_t *)sq_peek(&g_sem_list); - while (sem) { + if (sem == NULL) { + bmdbg("g_sem_list is empty.\n"); + } else { + do { #if CONFIG_SEM_PREALLOCHOLDERS > 0 for (holder = sem->hhead; holder; holder = holder->flink) #else @@ -563,6 +566,7 @@ void binary_manager_release_binary_sem(int bin_idx) } } sem = sq_next(sem); + } while (sem); } irqrestore(flags); }
herm: accept old /view/* wires
?+ wire !! :: pass on dill blits for the session :: - [%dill @ ~] + [?(%view %dill) @ ~] ::NOTE /view for backwards compat =* ses i.t.wire ?. ?=([%dill %blit *] sign-arvo) ~| [%unexpected-sign [- +<]:sign-arvo]
Update inaccurate comments.
@@ -130,7 +130,7 @@ protocolLocalParam(ProtocolStorageType protocolStorageType, unsigned int hostId, // Add the host id kvPut(optionReplace, VARSTR(CFGOPT_HOST_ID_STR), VARUINT(hostId)); - // Add the storage type + // Add the remote type kvPut(optionReplace, VARSTR(CFGOPT_REMOTE_TYPE_STR), VARSTR(protocolStorageTypeStr(protocolStorageType))); // Only enable file logging on the local when requested @@ -305,7 +305,7 @@ protocolRemoteParam(ProtocolStorageType protocolStorageType, unsigned int protoc // Always output errors on stderr for debugging purposes kvPut(optionReplace, VARSTR(CFGOPT_LOG_LEVEL_STDERR_STR), VARSTRDEF("error")); - // Add the type + // Add the remote type kvPut(optionReplace, VARSTR(CFGOPT_REMOTE_TYPE_STR), VARSTR(protocolStorageTypeStr(protocolStorageType))); StringList *commandExec = cfgExecParam(cfgCmdRemote, optionReplace, false, true);
[Makefile] Compile unit tests with the `minpool` flavor
@@ -103,15 +103,13 @@ riscv-isa-sim: update_opcodes ../configure --prefix=$(ISA_SIM_INSTALL_DIR) && make && make install # Unit tests for verification -MINPOOL_CONFIG = num_cores=16 num_cores_per_tile=4 - .PHONY: test build_test clean_test test: build_test export PATH=$(ISA_SIM_INSTALL_DIR)/bin:$$PATH; \ make -C $(RISCV_TESTS_DIR)/isa run && \ - COMPILER=gcc $(MINPOOL_CONFIG) make -C $(SOFTWARE_DIR) test && \ - $(MINPOOL_CONFIG) make -C hardware verilate_test + config=minpool COMPILER=gcc make -C $(SOFTWARE_DIR) test && \ + config=minpool make -C hardware verilate_test build_test: update_opcodes cd $(RISCV_TESTS_DIR); \
Update sgemm_kernel_16x4_skylakex_3.c
-#if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3 -#pragma clang optimize "O2" -#endif - - /* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */ /* r10 to assist prefetch, r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = %1 + 3r12 */
Fix incorrect error code in StartupReplicationOrigin(). ERRCODE_CONFIGURATION_LIMIT_EXCEEDED was used for checksum failure, use ERRCODE_DATA_CORRUPTED instead. Reported-by: Tatsuhito Kasahara Author: Tatsuhito Kasahara Backpatch-through: 9.6, where it was introduced Discussion:
@@ -791,7 +791,7 @@ StartupReplicationOrigin(void) FIN_CRC32C(crc); if (file_crc != crc) ereport(PANIC, - (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), + (errcode(ERRCODE_DATA_CORRUPTED), errmsg("replication slot checkpoint has wrong checksum %u, expected %u", crc, file_crc)));
Bump stackage version.
-resolver: lts-14.4 +resolver: lts-14.21 packages: - king @@ -11,9 +11,8 @@ extra-deps: - flat-0.3.4@sha256:002a0e0ae656ea8cc02a772d0bcb6ea7dbd7f2e79070959cc748ad1e7138eb38 - base58-bytestring-0.1.0@sha256:a1da72ee89d5450bac1c792d9fcbe95ed7154ab7246f2172b57bd4fd9b5eab79 - lock-file-0.7.0.0@sha256:3ad84b5e454145e1d928063b56abb96db24a99a21b493989520e58fa0ab37b00 - - urbit-hob-0.3.0@sha256:4871bd8ad01171ae5d4e50a344f4b8757e9eee80f62ab40a80f5311cd443b115 + - urbit-hob-0.3.1@sha256:afbdc7ad071eefc6ca85f5b598b6c62ed49079d15d1840dac27438a3b3150303 - para-1.1@sha256:a90eebb063ad70271e6e2a7f00a93e8e8f8b77273f100f39852fbf8301926f81 - - websockets-0.12.6.1@sha256:3816e841d8102877817d24ef5c96288e79f1323434268b866aa40732cc86763f nix: packages:
Fix case where value is not an number in eventVariableSetToValue
@@ -39,7 +39,7 @@ const compile = (input, helpers) => { if (value === 1) { const { variableSetToTrue } = helpers; variableSetToTrue(input.variable); - } else if (value === 0) { + } else if (value === 0 || isNaN(value)) { const { variableSetToFalse } = helpers; variableSetToFalse(input.variable); } else {
Add test script for RSA signature
@@ -8835,6 +8835,36 @@ run_test "TLS1.3: minimal feature sets - openssl" \ -c "<= parse finished message" \ -c "HTTP/1.0 200 ok" +requires_openssl_tls1_3 +requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL +requires_config_disabled MBEDTLS_USE_PSA_CRYPTO +run_test "TLS1.3: Test client hello msg work - openssl" \ + "$O_NEXT_SRV -tls1_3 -msg -no_middlebox" \ + "$P_CLI debug_level=3 min_version=tls1_3 max_version=tls1_3 force_ciphersuite=TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 curves=secp256r1" \ + 1 \ + -c "SSL - The requested feature is not available" \ + -s "ServerHello" \ + -c "tls1_3 client state: 0" \ + -c "tls1_3 client state: 2" \ + -c "tls1_3 client state: 19" \ + -c "tls1_3 client state: 5" \ + -c "tls1_3 client state: 3" \ + -c "tls1_3 client state: 9" \ + -c "tls1_3 client state: 13" \ + -c "tls1_3 client state: 7" \ + -c "tls1_3 client state: 20" \ + -c "tls1_3 client state: 11" \ + -c "tls1_3 client state: 14" \ + -c "tls1_3 client state: 15" \ + -c "<= ssl_tls1_3_process_server_hello" \ + -c "server hello, chosen ciphersuite: ( 1301 ) - TLS1-3-AES-128-GCM-SHA256" \ + -c "=> ssl_tls1_3_process_server_hello" \ + -c "<= parse encrypted extensions" \ + -c "Certificate verification flags clear" \ + -c "=> parse certificate verify" \ + -c "<= parse certificate verify" \ + -c "mbedtls_ssl_tls13_process_certificate_verify() returned 0" + requires_gnutls_tls1_3 requires_gnutls_next_no_ticket requires_gnutls_next_disable_tls13_compat
hslua-aeson: Update repo URL, homepage
@@ -4,7 +4,7 @@ version: 2.0.1 synopsis: Allow aeson data types to be used with lua. description: This package provides instances to push and receive any datatype encodable as JSON to and from the Lua stack. -homepage: https://github.com/tarleb/hslua-aeson#readme +homepage: https://hslua.org/ license: MIT license-file: LICENSE author: Albert Krewinkel @@ -22,7 +22,8 @@ tested-with: GHC == 8.0.2 source-repository head type: git - location: https://github.com/hslua/hslua-aeson + location: https://github.com/hslua/hslua + subdir: hslua-aeson common common-options default-language: Haskell2010
Tests: test/test_flowprobe.py Fix TypeError. TypeError: not all arguments converted during string formatting
@@ -72,7 +72,7 @@ class VppCFLOW(VppObject): (self._intf, self._datapath)) def object_id(self): - return "ipfix-collector-%s" % (self._src, self.dst) + return "ipfix-collector-%s-%s" % (self._src, self.dst) def query_vpp_config(self): return self._configured
dpdk: fix coverity Coverity does not seem happy about exotic control flows in switch/case statements Type: fix Fixes:
@@ -495,6 +495,8 @@ dpdk_lib_init (dpdk_main_t * dm) xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; } + xd->port_type = port_type_from_speed_capa (&dev_info); + break; case VNET_DPDK_PMD_CXGBE: case VNET_DPDK_PMD_MLX4: case VNET_DPDK_PMD_QEDE:
Fixed example 5
@@ -51,11 +51,11 @@ extract_kv_to_files(struct hse_kvs *kvs, int file_cnt, char **files) snprintf(outfile, sizeof(outfile), "%s.%s", files[i], "out"); snprintf(pfx, sizeof(pfx), "%s|", files[i]); - printf("filename is:%s", outfile); + printf("filename is:%s\n", outfile); fd = open(outfile, O_RDWR | O_CREAT); if (fd < 0) { - printf("Error opening file"); + printf("Error opening file\n"); return errno; } @@ -84,7 +84,7 @@ put_files_as_kv(struct hse_kvdb *kvdb, struct hse_kvs *kvs, int kv_cnt, char **k hse_err_t rc; for (i = 0; i < kv_cnt; i++) { - printf("Inserting chunks for %s", (char *)keys[i]); + printf("Inserting chunks for %s\n", (char *)keys[i]); char val[HSE_KVS_VLEN_MAX]; char key_chunk[HSE_KVS_KLEN_MAX]; ssize_t len; @@ -156,7 +156,7 @@ main(int argc, char **argv) rc = hse_kvdb_init(); if (rc) { - printf("Failed to initialize kvdb"); + printf("Failed to initialize kvdb\n"); exit(1); } @@ -171,7 +171,7 @@ main(int argc, char **argv) exit(1); if (extract) - rc = extract_kv_to_files(kvs, argc - optind - 1, &argv[optind]); + rc = extract_kv_to_files(kvs, argc - optind, &argv[optind]); else rc = put_files_as_kv(kvdb, kvs, argc - optind, &argv[optind]);
Zoul platform.c: include stdlib.h
#include <stdint.h> #include <string.h> #include <stdio.h> +#include <stdlib.h> /*---------------------------------------------------------------------------*/ /* Log configuration */ #include "sys/log.h"
Remove LwIP from 201910.00 changelog on master
- Contains bug fixes and enhancements in ESP NIMBLE stack. - Contains bug fixes in ESP Bluedroid stack. -#### Enable lwIP full duplex feature -- The same socket may be used by multiple tasks concurrently. - ## 201908.00 08/26/2019 ### New Features
cheza: Fix triggering PPC interrupt Cheza only has PPC on port-0. Passing only 0 to the interrupt handler. BRANCH=none TEST=Plug a charger to port-0; it charges.
@@ -115,9 +115,8 @@ static void anx74xx_cable_det_interrupt(enum gpio_signal signal) static void ppc_interrupt(enum gpio_signal signal) { - int port = (signal == GPIO_USB_C0_SWCTL_INT_ODL) ? 0 : 1; - - sn5s330_interrupt(port); + /* Only port-0 uses PPC chip */ + sn5s330_interrupt(0); } /* ADC channels */
apps/iperf: fix not displaying bandwidth in udp This patch allows iperf to display bandwidth/transfer in udp rx.
@@ -2430,6 +2430,11 @@ static void iperf_print_results(struct iperf_test *test) } else { iprintf(test, report_bw_format, sp->socket, start_time, end_time, ubuf, nbuf, report_receiver); } + } else { + if (test->json_output) + cJSON_AddItemToObject(json_summary_stream, "receiver", iperf_json_printf("socket: %d start: %f end: %f seconds: %f bytes: %d bits_per_second: %f", (int64_t) sp->socket, (double) start_time, (double) end_time, (double) end_time, (int64_t) bytes_received, bandwidth * 8)); + else + iprintf(test, report_bw_format, sp->socket, start_time, end_time, ubuf, nbuf, report_receiver); } } }
rename tasking_installed() to tasking_is_active(), tasking_install() to tasking_init()
@@ -172,7 +172,7 @@ void list_task(task_t* task) { } void block_task_context(task_t* task, task_state reason, void* context) { - if (!tasking_installed()) return; + if (!tasking_is_active()) return; task->state = reason; task->block_context = context; @@ -188,7 +188,7 @@ void block_task(task_t* task, task_state reason) { } void unblock_task(task_t* task) { - if (!tasking_installed()) return; + if (!tasking_is_active()) return; lock(mutex); task->state = RUNNABLE; @@ -260,7 +260,7 @@ task_t* task_current() { } void add_process(task_t* task) { - if (!tasking_installed()) return; + if (!tasking_is_active()) return; list_task(task); @@ -434,7 +434,7 @@ void promote_task(task_t* task) { switch_queue(task, task->queue - 1); } -bool tasking_installed() { +bool tasking_is_active() { return (queues && queues->size >= 1 && current_task); } @@ -446,12 +446,19 @@ void booster() { } } -void tasking_install(mlfq_option options) { - if (tasking_installed()) return; +void tasking_install() { + Deprecated(); +} - printf_info("Initializing tasking..."); +void tasking_installed() { + Deprecated(); +} - kernel_begin_critical(); +void tasking_init(mlfq_option options) { + if (tasking_is_active()) { + panic("called tasking_init() after it was already active"); + return; + } printf_dbg("moving stack"); move_stack((void*)0xDFFFF000, 0x4000); @@ -526,7 +533,7 @@ void tasking_install(mlfq_option options) { } void update_blocked_tasks() { - if (!tasking_installed()) return; + if (!tasking_is_active()) return; //if there is a pending key, wake first responder /* @@ -615,7 +622,10 @@ task_t* first_responder() { } int fork(char* name) { - if (!tasking_installed()) return 0; //TODO: check this result + if (!tasking_is_active()) { + panic("called fork() before tasking was active"); + return 0; + } kernel_begin_critical(); @@ -732,7 +742,10 @@ array_m* first_queue_containing_runnable(void) { } task_t* mlfq_schedule() { - if (!tasking_installed()) return NULL; + if (!tasking_is_active()) { + panic("called mlfq_schedule() before tasking was active"); + return NULL; + } //find current index in queue array_m* current_queue = array_m_lookup(queues, current_task->queue); @@ -985,7 +998,7 @@ void proc() { } void force_enumerate_blocked() { - if (!tasking_installed()) return; + if (!tasking_is_active()) return; update_blocked_tasks(); }
[fixup] Add CHANGES entry
release branch. Changes between 1.1.0h and 1.1.1 [xx XXX xxxx] + *) Make ec_group_do_inverse_ord() more robust and available to other + EC cryptosystems, so that irrespective of BN_FLG_CONSTTIME, SCA + mitigations are applied to the fallback BN_mod_inverse(). + When using this function rather than BN_mod_inverse() directly, new + EC cryptosystem implementations are then safer-by-default. + [Billy Bob Brumley] + *) Add coordinate blinding for EC_POINT and implement projective coordinate blinding for generic prime curves as a countermeasure to chosen point SCA attacks.
[ya tool] use arcadia version of godoc
"ygdiff": { "description": "Run ygdiff tool", "visible": false }, "crypta": { "description": "Run Crypta client" }, "go": { "description": "Run go tool (1.12.9)" }, - "godoc": { "description": "Run godoc tool (1.12.9)" }, "gofmt": { "description": "Run gofmt tool (1.12.9)" }, "go_1.13": { "description": "Run go tool (1.13)" }, "gofmt_1.13": { "description": "Run gofmt tool (1.13)" }, + "godoc": { "description": "Arcadia version of godoc" }, "yo": { "description": "Tool for managing vendor/ directory" }, "tvmknife": { "description": "Tool for debugging and testing with TVM tickets" }, "sandboxctl": { "description": "Tool to run tasks in Sandbox" }, "tools": { "golang": {"bottle": "golang_1.12.9", "executable": "golang_1.12.9"}, "go": {"bottle": "golang_1.12.9", "executable": "go_1.12.9"}, - "godoc": {"bottle": "golang_1.12.9", "executable": "godoc_1.12.9"}, "gofmt": {"bottle": "golang_1.12.9", "executable": "gofmt_1.12.9"} }, "platforms": [ "version": "1.13" } }, + "godoc": { + "tools": { + "godoc": { "bottle": "godoc", "executable": "godoc" } + }, + "platforms": [ + {"host": {"os": "LINUX"}, "default": true}, + {"host": {"os": "DARWIN"}, "default": true}, + {"host": {"os": "WIN"}, "default": true} + ] + }, "qemu": { "tools": { "qemu": { "bottle": "qemu", "executable": "qemu" }, "arc": ["arc"] } }, + "godoc": { + "formula": { + "sandbox_id": 511821444, + "match": "godoc" + }, + "executable": { + "godoc": ["godoc"] + } + }, "yo": { "formula": { "sandbox_id": [496866925, 496866953], }, "executable": { "go_1.12.9": ["bin", "go"], - "godoc_1.12.9": ["bin", "godoc"], "gofmt_1.12.9": ["bin", "gofmt"] } },
Fix stdbool.h include for VS 2013 and higher
@@ -38,7 +38,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define NULL 0 #endif -#if defined(HAVE_STDBOOL_H) +#if defined(HAVE_STDBOOL_H) || (defined(_MSC_VER) && _MSC_VER >= 1800) #include <stdbool.h> #else #ifndef __cplusplus
o2d related latest fix as per
@@ -3872,7 +3872,7 @@ uerra, eswi-enfo:total_cloud_cover_sfc maximum value 96.4844 is not in [100,100] {NULL, }, }, - {&daily_average, &given_level, &has_bitmap}, + {&daily_average, &predefined_level, &has_bitmap}, }, { "northward_sea_water_velocity_o2d.s2", @@ -3892,7 +3892,7 @@ uerra, eswi-enfo:total_cloud_cover_sfc maximum value 96.4844 is not in [100,100] {NULL, }, }, - {&daily_average, &given_level, &has_bitmap}, + {&daily_average, &predefined_level, &has_bitmap}, }, { "sea-ice_thickness_o2d.s2",
admin/genders: bump to v1.27
%define pname genders Name: %{pname}%{PROJ_DELIM} -Version: 1.22 +Version: 1.27 Release: 1%{?dist} Summary: Static cluster configuration database License: GPL -Source: https://github.com/chaos/genders/releases/download/genders-1-22-1/%{pname}-%{version}.tar.gz +Source: https://github.com/chaos/genders/releases/download/genders-1-27-3/%{pname}-%{version}.tar.gz Requires: perl Group: %{PROJ_NAME}/admin URL: https://github.com/chaos/genders
Skip correct number of points when looking for end of cubic
@@ -182,7 +182,7 @@ void GraphicsPath::closeLine(int inCommand0, int inData0) break; case pcCubicTo: - point+=1; + point+=2; // Fall through... case pcWideLineTo: case pcCurveTo:
Changed fiducial values for tests/radheat_thermint.
@@ -14,11 +14,11 @@ def test_radheat_thermint(): output = GetOutput(path=cwd) # Check - assert np.isclose(output.log.final.earth.TMan, 2256.1804,atol=200) - assert np.isclose(output.log.final.earth.TCore, 5009.8424,atol=200) # !!! - assert np.isclose(output.log.final.earth.RIC, 1221e3,atol=30e3) # !!! - assert np.isclose(output.log.final.earth.RadPowerTotal, 24.2630e12,atol=4e12) - assert np.isclose(output.log.final.earth.MagMom, 0.994634,atol=0.2) + assert np.isclose(output.log.final.earth.TMan, 2256.180402) + assert np.isclose(output.log.final.earth.TCore, 5009.842462) + assert np.isclose(output.log.final.earth.RIC, 1.202998e+06) + assert np.isclose(output.log.final.earth.RadPowerTotal, 2.426301e+13) + assert np.isclose(output.log.final.earth.MagMom, 0.994634) if __name__ == "__main__":
clarify dependancy
@@ -45,10 +45,10 @@ Ubuntu packages can be found at: https://launchpad.net/~jhli/+archive/ubuntu/lib Alternately, -DSAFECLIB_SRC_DOWNLOAD_AND_STATIC_LINK=ON to download sources and statically link to safeclib -### ndctl +### libndctl -ipmctl requires ndctl as a dependency on Linux-based systems. +ipmctl depends on libndctl (ndctl-libs). It can be found here https://github.com/pmem/ndctl if not available as a package.
Added version to upnp
@@ -140,7 +140,7 @@ endif() set_target_properties (upnp_shared PROPERTIES OUTPUT_NAME upnp EXPORT_NAME UPNP::Shared - VERSION ${UPNP_VERSION} + VERSION ${UPNP_SOMAJOR} SOVERSION ${UPNP_VERSION_MAJOR} PUBLIC_HEADER "${UPNP_HEADERS}" ) @@ -213,6 +213,7 @@ endif() set_target_properties (upnp_static PROPERTIES OUTPUT_NAME upnp${STATIC_POSTFIX} EXPORT_NAME UPNP::Static + VERSION ${UPNP_SOMAJOR} ) target_link_libraries (upnp_static
linop: only free if non-NULL
@@ -462,6 +462,8 @@ struct linop_s* linop_loop(unsigned int D, const long dims[D], struct linop_s* o */ void linop_free(const struct linop_s* op) { + if (NULL == op) + return; operator_free(op->forward); operator_free(op->adjoint); operator_free(op->normal);
Move GC collect after loading unit test function.
@@ -26,8 +26,8 @@ for module in sorted(os.listdir(SCRIPT_DIR)): test_passed = True test_path = "/".join((mod_path, test)) try: - gc.collect() exec(open(test_path).read()) + gc.collect() if unittest(DATA_DIR, TEMP_DIR) == False: raise Exception() except Exception as e:
fixed bug in FluxOutGlobal unit
@@ -2230,7 +2230,7 @@ void InitializeOutputPoise(OUTPUT *output,fnWriteOutput fnWrite[]) { */ output[OUT_FLUXOUTGLOBAL].bNeg = 1; output[OUT_FLUXOUTGLOBAL].dNeg = 1; - sprintf(output[OUT_FLUXINGLOBAL].cNeg,"W/m^2"); + sprintf(output[OUT_FLUXOUTGLOBAL].cNeg,"W/m^2"); output[OUT_FLUXOUTGLOBAL].iNum = 1; output[OUT_FLUXOUTGLOBAL].iModuleBit = POISE; fnWrite[OUT_FLUXOUTGLOBAL] = &WriteFluxOutGlobal;
* CMakeLists fixes for last cmake version
@@ -8,6 +8,8 @@ unset(CHANGELOG_LAST_VERSION) unset(CHANGELOG_LAST_MESSAGE) include(DebChangelog) +project(${PROJECT_NAME} C) + set(PROJECT_NAME "ejdb2") set(PROJECT_VENDOR "Softmotions (https://softmotions.com)") set(PROJECT_WEBSITE "https://github.com/Softmotions/ejdb") @@ -18,8 +20,6 @@ set(CHANGELOG_MESSAGE ${CHANGELOG_LAST_MESSAGE}) set(PROJECT_PPA "ppa:adamansky/ejdb2") set(PROJECT_PPA_USER "adamansky") -project(${PROJECT_NAME} C) - set(PROJECT_VERSION_MAJOR ${CHANGELOG_LAST_VERSION_MAJOR}) set(PROJECT_VERSION_MINOR ${CHANGELOG_LAST_VERSION_MINOR}) set(PROJECT_VERSION_PATCH ${CHANGELOG_LAST_VERSION_PATCH})
Remove the callable? predicate. Many times are callable now in some circumstances, so the predicate is not that useful.
(defn indexed? "Check if x is an array or tuple." [x] (def t (type x)) (if (= t :array) true (= t :tuple))) -(defn callable? "Check if x is a function or cfunction." [x] - (def t (type x)) - (if (= t :function) true (= t :cfunction))) (defn true? "Check if x is true." [x] (= x true)) (defn false? "Check if x is false." [x] (= x false)) (defn nil? "Check if x is nil." [x] (= x nil))
Disable FreeBSD 13 builds on Cirrus CI. Build have begin failing with this error: ld-elf.so.1: /usr/local/bin/rsync: Undefined symbol "locale_charset" There does not appear to be a new version so hopefully this is a transient error. Disable for now to free the build pipeline.
@@ -45,19 +45,19 @@ freebsd_12_task: # FreeBSD 13 # ---------------------------------------------------------------------------------------------------------------------------------- -freebsd_13_task: - freebsd_instance: - image_family: freebsd-13-1 - cpu: 4 - memory: 4G +# freebsd_13_task: +# freebsd_instance: +# image_family: freebsd-13-1 +# cpu: 4 +# memory: 4G - install_script: pkg install -y bash git postgresql-libpqxx pkgconf libxml2 gmake perl5 libyaml p5-YAML-LibYAML rsync meson +# install_script: pkg install -y bash git postgresql-libpqxx pkgconf libxml2 gmake perl5 libyaml p5-YAML-LibYAML rsync meson - script: - - cd .. && perl ${CIRRUS_WORKING_DIR}/test/test.pl --min-gen --make-cmd=gmake --vm=none --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup +# script: +# - cd .. && perl ${CIRRUS_WORKING_DIR}/test/test.pl --min-gen --make-cmd=gmake --vm=none --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup - debug_script: - - ls -lah ${CIRRUS_WORKING_DIR} +# debug_script: +# - ls -lah ${CIRRUS_WORKING_DIR} # MacOS Monterey # ----------------------------------------------------------------------------------------------------------------------------------
ci: Fix cleanup process for integration-tests We moved the inspektor-gadget integration tests to a new directory resulting in the test process named differently i.e inspektor-gadget.test rather than integration.test.
@@ -40,7 +40,7 @@ runs: # Forward the SIGINT directly to test process but wait for current # active jobs since we can only wait for current shell child process. echo "IntegrationTestsJob: Notifying the integration tests process about the cancellation" - kill -2 $(pidof integration.test) > /dev/null + kill -2 $(pidof inspektor-gadget.test) > /dev/null echo "IntegrationTestsJob: Waiting for the integration tests process to finish" wait $(jobs -p) echo "IntegrationTestsJob: We are done with the clean-up. Let the job exit"
Fix incorrect enums being used Fix memory leak due to aead_abort() using incorrect enums to identify algorithm used. Fix incorrect return on failure to check tag on aead_verify()
@@ -948,7 +948,7 @@ psa_status_t mbedtls_psa_aead_verify( psa_aead_operation_t *operation, { if( do_tag_check && safer_memcmp(tag, check_tag, tag_length) != 0 ) { - status = MBEDTLS_ERR_GCM_AUTH_FAILED; + status = PSA_ERROR_INVALID_SIGNATURE; } } @@ -962,8 +962,8 @@ psa_status_t mbedtls_psa_aead_abort( psa_aead_operation_t *operation ) { switch( operation->alg ) { -#if defined(MBEDTLS_CCM_C) - case MBEDTLS_PSA_BUILTIN_ALG_CCM: +#if defined(MBEDTLS_PSA_BUILTIN_ALG_CCM) + case PSA_ALG_CCM: mbedtls_ccm_free( &operation->ctx.ccm ); break; #endif /* MBEDTLS_PSA_BUILTIN_ALG_CCM */
Document and rename heightmap same_size function. Now returns false when either parameter is NULL.
@@ -50,9 +50,11 @@ static bool in_bounds(const TCOD_heightmap_t *hm, int x, int y) { if (y < 0 || y >= hm->h) return false; return true; } - -static bool same_size(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t *hm2) { - return hm1->w == hm2->w && hm1->h == hm2->h; +/** + Returns true if these heighmaps have the same shape and are non-NULL. + */ +static bool is_same_size(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t *hm2) { + return hm1 && hm2 && hm1->w == hm2->w && hm1->h == hm2->h; } TCOD_heightmap_t *TCOD_heightmap_new(int w,int h) { @@ -300,7 +302,7 @@ void TCOD_heightmap_clamp(TCOD_heightmap_t *hm, float min, float max) { } void TCOD_heightmap_lerp_hm(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t *hm2, TCOD_heightmap_t *hmres, float coef) { - if (!same_size(hm1, hm2) || !same_size(hm1, hmres)) { + if (!is_same_size(hm1, hm2) || !is_same_size(hm1, hmres)) { return; } for (int i=0; i < hm1->w*hm1->h; i++ ) { @@ -309,7 +311,7 @@ void TCOD_heightmap_lerp_hm(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t } void TCOD_heightmap_add_hm(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t *hm2, TCOD_heightmap_t *hmres) { - if (!same_size(hm1, hm2) || !same_size(hm1, hmres)) { + if (!is_same_size(hm1, hm2) || !is_same_size(hm1, hmres)) { return; } for (int i=0; i < hm1->w*hm1->h; i++ ) { @@ -318,7 +320,7 @@ void TCOD_heightmap_add_hm(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t * } void TCOD_heightmap_multiply_hm(const TCOD_heightmap_t *hm1, const TCOD_heightmap_t *hm2, TCOD_heightmap_t *hmres) { - if (!same_size(hm1, hm2) || !same_size(hm1, hmres)) { + if (!is_same_size(hm1, hm2) || !is_same_size(hm1, hmres)) { return; } for (int i=0; i < hm1->w*hm1->h; i++ ) {
Use progress_cb in genrsa
static int verbose = 0; -static int genrsa_cb(EVP_PKEY_CTX *ctx); - typedef enum OPTION_choice { OPT_COMMON, #ifndef OPENSSL_NO_DEPRECATED_3_0 @@ -180,7 +178,7 @@ opthelp: if (!init_gen_str(&ctx, "RSA", eng, 0, NULL, NULL)) goto end; - EVP_PKEY_CTX_set_cb(ctx, genrsa_cb); + EVP_PKEY_CTX_set_cb(ctx, progress_cb); EVP_PKEY_CTX_set_app_data(ctx, bio_err); if (EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, num) <= 0) { @@ -243,24 +241,3 @@ opthelp: return ret; } -static int genrsa_cb(EVP_PKEY_CTX *ctx) -{ - char c = '*'; - BIO *b = EVP_PKEY_CTX_get_app_data(ctx); - int p = EVP_PKEY_CTX_get_keygen_info(ctx, 0); - - if (!verbose) - return 1; - - if (p == 0) - c = '.'; - if (p == 1) - c = '+'; - if (p == 2) - c = '*'; - if (p == 3) - c = '\n'; - BIO_write(b, &c, 1); - (void)BIO_flush(b); - return 1; -}
SOVERSION bump to version 7.12.1
@@ -73,7 +73,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 7) set(SYSREPO_MINOR_SOVERSION 12) -set(SYSREPO_MICRO_SOVERSION 0) +set(SYSREPO_MICRO_SOVERSION 1) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
common/i2c_bitbang.c: Format with clang-format BRANCH=none TEST=none
@@ -281,9 +281,8 @@ static int i2c_read_byte(const struct i2c_port_t *i2c_port, uint8_t *byte, } static int i2c_bitbang_xfer(const struct i2c_port_t *i2c_port, - const uint16_t addr_flags, - const uint8_t *out, int out_size, - uint8_t *in, int in_size, int flags) + const uint16_t addr_flags, const uint8_t *out, + int out_size, uint8_t *in, int in_size, int flags) { uint16_t addr_8bit = addr_flags << 1, err = EC_SUCCESS; int i = 0; @@ -320,7 +319,8 @@ static int i2c_bitbang_xfer(const struct i2c_port_t *i2c_port, for (i = 0; i < in_size; i++) { err = i2c_read_byte(i2c_port, &in[i], - (flags & I2C_XFER_STOP) && (i == in_size - 1)); + (flags & I2C_XFER_STOP) && + (i == in_size - 1)); if (err) goto exit; } @@ -353,9 +353,7 @@ __overridable void board_pre_task_i2c_peripheral_init(void) { } -const struct i2c_drv bitbang_drv = { - .xfer = &i2c_bitbang_xfer -}; +const struct i2c_drv bitbang_drv = { .xfer = &i2c_bitbang_xfer }; #ifdef TEST_BUILD int bitbang_start_cond(const struct i2c_port_t *i2c_port)
decisions: clarify sentence by removing rationale
@@ -27,7 +27,7 @@ The main purpose of the decision process is to get a common understanding of the - For reviewers: - Prefer to directly give suggestions how to change sentences. - General questions should be asked in the root of "Conversation" and not at vaguely related sentences in the review. -- Decision PRs do not significantly change anything but one decision to make discussions clearly structured. +- Decision PRs do not significantly change anything but one decision. - Changes not changing the decision step or the direction of the decision are not decision PRs. - The person merging the decision PR must be someone else as the person that created the decision.