message
stringlengths
6
474
diff
stringlengths
8
5.22k
ssl: add 9.5 merge FIXME for SAN TAP tests We don't have Subject Alternative Name support, and won't until 9.5. Make sure we uncomment these tests when we get there.
@@ -158,6 +158,7 @@ test_connect_ok("sslmode=verify-ca host=wronghost.test"); test_connect_fails("sslmode=verify-full host=wronghost.test"); # Test Subject Alternative Names. +# GPDB_95_MERGE_FIXME: uncomment these tests when commit acd08d764 is merged. #switch_server_cert($node, 'server-multiple-alt-names'); # #note "test hostname matching with X.509 Subject Alternative Names";
drivebase: add actuation
static pbio_drivebase_t __db; -// Get the physical state of a single motor +// Get the physical state of a drivebase static pbio_error_t drivebase_get_state(pbio_drivebase_t *db, int32_t *time_now, int32_t *distance_count, @@ -64,6 +64,24 @@ static pbio_error_t drivebase_get_state(pbio_drivebase_t *db, return PBIO_SUCCESS; } +// Get the physical state of a drivebase +static pbio_error_t drivebase_actuate(pbio_drivebase_t *db, int32_t distance_control, int32_t heading_control) { + pbio_error_t err; + + int32_t dif = pbio_math_mul_i32_fix16(heading_control, db->turn_counts_per_diff); + int32_t sum = pbio_math_mul_i32_fix16(distance_control, db->drive_counts_per_sum); + + err = pbio_hbridge_set_duty_cycle_sys(db->left->hbridge, sum + dif); + if (err != PBIO_SUCCESS) { + return err; + } + err = pbio_hbridge_set_duty_cycle_sys(db->right->hbridge, sum - dif); + if (err != PBIO_SUCCESS) { + return err; + } + return PBIO_SUCCESS; +} + // Log motor data for a motor that is being actively controlled static pbio_error_t drivebase_log_update(pbio_drivebase_t *db, int32_t time_now, @@ -198,13 +216,16 @@ pbio_error_t pbio_drivebase_start(pbio_drivebase_t *db, int32_t speed, int32_t r static pbio_error_t pbio_drivebase_update(pbio_drivebase_t *db) { + // Get the physical state int32_t time_now, distance_count, distance_rate_count, heading_count, heading_rate_count; - pbio_error_t err = drivebase_get_state(db, &time_now, &distance_count, &distance_rate_count, &heading_count, &heading_rate_count); if (err != PBIO_SUCCESS) { return err; } + // Actuate + err = drivebase_actuate(db, 0, 0); + // No control for now, just logging return drivebase_log_update(db, time_now, distance_count, distance_rate_count, heading_count, heading_rate_count); }
vnet:remove duplicate interface function macro
@@ -89,21 +89,6 @@ typedef struct _vnet_interface_function_list_elt clib_error_t *(*fp) (struct vnet_main_t * vnm, u32 if_index, u32 flags); } _vnet_interface_function_list_elt_t; -#define _VNET_INTERFACE_FUNCTION_DECL(f,tag) \ - \ -static void __vnet_interface_function_init_##tag##_##f (void) \ - __attribute__((__constructor__)) ; \ - \ -static void __vnet_interface_function_init_##tag##_##f (void) \ -{ \ - vnet_main_t * vnm = vnet_get_main(); \ - static _vnet_interface_function_list_elt_t init_function; \ - init_function.next_interface_function = \ - vnm->tag##_functions[VNET_ITF_FUNC_PRIORITY_LOW]; \ - vnm->tag##_functions[VNET_ITF_FUNC_PRIORITY_LOW] = &init_function; \ - init_function.fp = (void *) &f; \ -} - #define _VNET_INTERFACE_FUNCTION_DECL_PRIO(f,tag,p) \ \ static void __vnet_interface_function_init_##tag##_##f (void) \ @@ -118,6 +103,9 @@ static void __vnet_interface_function_init_##tag##_##f (void) \ init_function.fp = (void *) &f; \ } +#define _VNET_INTERFACE_FUNCTION_DECL(f,tag) \ + _VNET_INTERFACE_FUNCTION_DECL_PRIO(f,tag,VNET_ITF_FUNC_PRIORITY_LOW) + #define VNET_HW_INTERFACE_ADD_DEL_FUNCTION(f) \ _VNET_INTERFACE_FUNCTION_DECL(f,hw_interface_add_del) #define VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(f) \
Install dependencies for tests in windows ci.
@@ -94,6 +94,17 @@ function sub-python { echo "include(FindPackageHandleStandardArgs)" >> $FindPython echo "FIND_PACKAGE_HANDLE_STANDARD_ARGS(Python REQUIRED_VARS Python_EXECUTABLE Python_LIBRARIES Python_INCLUDE_DIRS VERSION_VAR Python_VERSION)" >> $FindPython echo "mark_as_advanced(Python_EXECUTABLE Python_LIBRARIES Python_INCLUDE_DIRS)" >> $FindPython + + # Install dependencies for tests + pip3 install requests + pip3 install setuptools + pip3 install wheel + pip3 install rsa + pip3 install fn + pip3 install scipy + pip3 install numpy + pip3 install scikit-learn + pip3 install joblib } # Ruby
Document -U flag to ldns-signzone Thank you Andreas for noticing!
@@ -91,6 +91,12 @@ small, and only the SEP keys that are passed are used. If there are no SEP keys, the DNSKEY RRset is signed with the non\-SEP keys. This option turns off the default and all keys are used to sign the DNSKEY RRset. +.TP +\fB-U\fR +Sign with every unique algorithm in the provided keys. The DNSKEY set +is signed with all the SEP keys, plus all the non\-SEP keys that have an +algorithm that was not presen in the SEP key set. + .TP \fB-E\fR \fIname\fR Use the EVP cryptographic engine with the given name for signing. This
SW: Improve error messages
@@ -398,7 +398,16 @@ __hw_wait_irq_retry: } rc = 0; break; - case CXL_EVENT_DATA_STORAGE: + + case CXL_EVENT_DATA_STORAGE: { + struct cxl_event_data_storage *ds = &card->event.fault; + + snap_trace(" %s: CXL_EVENT_DATA_STORAGE\n", __func__); + snap_trace(" flags=%04x addr=%08llx dsisr=%08llx\n", + ds->flags, (long long)ds->addr, (long long)ds->dsisr); + break; + } + case CXL_EVENT_AFU_ERROR: //case CXL_EVENT_READ_FAIL: default:
[components][driver]fix qspi bug
@@ -388,9 +388,10 @@ rt_spi_flash_device_t rt_sfud_flash_probe(const char *spi_flash_dev_name, const rt_qspi_configure(qspi_dev, &qspi_cfg); if(qspi_dev->enter_qspi_mode != RT_NULL) qspi_dev->enter_qspi_mode(qspi_dev); - } + /* set data lines width */ sfud_qspi_fast_read_enable(sfud_dev, qspi_dev->config.qspi_dl_width); + } #endif /* SFUD_USING_QSPI */ }
Add PH_TICKS_PARTIAL_NS
#define PH_TICKS_PER_HOUR (PH_TICKS_PER_MIN * 60) #define PH_TICKS_PER_DAY (PH_TICKS_PER_HOUR * 24) +#define PH_TICKS_PARTIAL_NS(Ticks) (((ULONG64)(Ticks) / PH_TICKS_PER_NS) % 1000000) #define PH_TICKS_PARTIAL_MS(Ticks) (((ULONG64)(Ticks) / PH_TICKS_PER_MS) % 1000) #define PH_TICKS_PARTIAL_SEC(Ticks) (((ULONG64)(Ticks) / PH_TICKS_PER_SEC) % 60) #define PH_TICKS_PARTIAL_MIN(Ticks) (((ULONG64)(Ticks) / PH_TICKS_PER_MIN) % 60)
Add new fields to FileShare schema
<!-- SharePoint debug fields --> <field name="sp_path" type="string" indexed="true" stored="true" multiValued="false"/> + <field name="sp_site" type="string" indexed="true" stored="true" multiValued="false"/> + <field name="sp_item" type="string" indexed="true" stored="true" multiValued="false"/> <!-- Common metadata fields, named specifically to match up with SolrCell metadata when parsing rich documents such as Word, PDF.
Fix Exception text in print
@@ -696,6 +696,10 @@ int mrbc_print_sub(const mrbc_value *v) console_printf( "#<Handle:%08x>", v->handle ); break; + case MRBC_TT_EXCEPTION: + console_printf( "#<%s>", symid_to_str(v->cls->sym_id)); + break; + default: console_printf("Not support MRBC_TT_XX(%d)", mrbc_type(*v)); break;
Fix ref-1.7 encoder test support
@@ -252,7 +252,7 @@ def get_encoder_params(encoderName, imageSet): name = "reference-1.7" outDir = "Test/Images/%s" % imageSet refName = None - if encoderName == "ref-2.0": + elif encoderName == "ref-2.0": # Note this option rebuilds a new reference test set using the # user's locally build encoder. encoder = te.Encoder2x("avx2")
codegen: disable check_gen test in ASAN builds
@@ -139,7 +139,14 @@ file (GLOB SCRIPT_TESTS *.sh) foreach (file ${SCRIPT_TESTS}) get_filename_component (name ${file} NAME) + if (NOT + ENABLE_ASAN + OR NOT + name + MATCHES + "check_gen.sh") add_scripttest (${name}) + endif () endforeach (file ${SCRIPT_TESTS}) add_subdirectory (gen)
stm32_eth: Fix in assertion parameters.
@@ -1134,7 +1134,7 @@ static int stm32_transmit(struct stm32_ethmac_s *priv) /* Set frame size */ - DEBUGASSERT(priv->dev.d_len <= CONFIG_NET_ETH_PKTSIZE); + DEBUGASSERT(priv->dev.d_len <= CONFIG_STM32_ETH_BUFSIZE); txdesc->tdes1 = priv->dev.d_len; /* Set the Buffer1 address pointer */
build centos image on RHEL
@@ -28,7 +28,7 @@ echo "-------------------------------------------------------" echo "Runtimes: Singularity" echo "-------------------------------------------------------" -if [ "x$DISTRO_FAMILY" == "xCentOS" ];then +if [ "x$DISTRO_FAMILY" == "xCentOS" -o "x$DISTRO_FAMILY" == "xRHEL" ];then export DISTRO=centos export BOOTSTRAP_DEF=/opt/ohpc/pub/doc/contrib/singularity-ohpc-2.3.1/examples/${DISTRO}/Singularity else
Add reference to Labs to Readme.md
@@ -39,3 +39,10 @@ This repository contains the FreeRTOS Kernel, a number of supplementary librarie ## Previous releases [Releases](https://github.com/FreeRTOS/FreeRTOS/releases) contains older FreeRTOS releases. +## FreeRTOS Lab Projects +FreeRTOS Lab projects are libraries and demos that are fully functional, but may be experimental or undergoing optimizations and refactorization to improve memory usage, modularity, documentation, demo usability, or test coverage. + +Most FreeRTOS Lab libraries can be found in the [FreeRTOS-Labs repository](https://github.com/FreeRTOS/FreeRTOS-Labs). + +A number of FreeRTOS Lab Demos can be found in the [FreeRTOS Github Organization](https://github.com/FreeRTOS) by searching for "Lab" or following [this link](https://github.com/FreeRTOS?q=Lab&type=&language=) to the search results. +
Another Windows build failure...
@@ -183,7 +183,7 @@ handle_connection(void *arg) unsigned long messages = 0; unsigned long long first_length = 0; unsigned long long sum = 0; - unsigned long round_bytes; + unsigned long long round_bytes; struct timeval round_start; time_t round_timeout = 0;
fix enable DAOS link in DAOS-Support README
[DAOS](https://github.com/daos-stack/daos) is supported as a backend storage system in dcp. The build instructions for enabling DAOS support can be found here: -[Enable DAOS](https://github.com/hpc/mpifileutils/doc/rst/build.rst). - +[Enable DAOS](https://mpifileutils.readthedocs.io/en/latest/build.html). The following are ways that DAOS can be used to move data both across DAOS as well as POSIX filesystems:
[software] Exclude systolic apps for non-systolic configurations
@@ -16,9 +16,14 @@ include $(RUNTIME_DIR)/runtime.mk APPS := $(patsubst $(APPS_DIR)/%/main.c,%,$(shell find $(APPS_DIR) -name "main.c")) BINARIES := $(addprefix $(BIN_DIR)/,$(APPS)) +ifeq ($(config), systolic) + ALL := $(APPS) +else + ALL := $(filter-out systolic/%,$(APPS)) +endif # Make all applications -all: $(BINARIES) +all: $(ALL) $(APPS): % : $(BIN_DIR)/% $(APPS_DIR)/Makefile $(shell find $(RUNTIME_DIR)/**.{S,c,h,ld} -type f)
Use `h2o_gettimeofday` rather than `gettimeofday`.
@@ -992,7 +992,7 @@ static int handle_settings_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *fram return H2O_HTTP2_ERROR_FRAME_SIZE; } if (conn->timestamps.settings_acked_at.tv_sec == 0 && conn->timestamps.settings_sent_at.tv_sec != 0) { - gettimeofday(&conn->timestamps.settings_acked_at, NULL); + conn->timestamps.settings_acked_at = h2o_gettimeofday(conn->super.ctx->loop); } } else { uint32_t prev_initial_window_size = conn->peer_settings.initial_window_size; @@ -1173,7 +1173,7 @@ static ssize_t expect_preface(h2o_http2_conn_t *conn, const uint8_t *src, size_t h2o_http2_encode_origin_frame(&conn->_write.buf, *conn->http2_origin_frame); } if (conn->timestamps.settings_sent_at.tv_sec == 0) { - gettimeofday(&conn->timestamps.settings_sent_at, NULL); + conn->timestamps.settings_sent_at = h2o_gettimeofday(conn->super.ctx->loop); } h2o_http2_conn_request_write(conn); }
Avoid rechecking min/max weight bitcount We know it's valid as checked during table creation
@@ -336,7 +336,7 @@ static float compress_symbolic_block_for_partition_1plane( bm.get_weight_quant_mode()); int bitcount = free_bits_for_partition_count[partition_count] - bits_used_by_weights; - if (bitcount <= 0 || bits_used_by_weights < 24 || bits_used_by_weights > 96) + if (bitcount <= 0) { qwt_errors[i] = 1e38f; continue; @@ -699,7 +699,7 @@ static float compress_symbolic_block_for_partition_2planes( 2 * di.weight_count, bm.get_weight_quant_mode()); int bitcount = 113 - 4 - bits_used_by_weights; - if (bitcount <= 0 || bits_used_by_weights < 24 || bits_used_by_weights > 96) + if (bitcount <= 0) { qwt_errors[i] = 1e38f; continue; @@ -713,7 +713,8 @@ static float compress_symbolic_block_for_partition_2planes( weight_high_value1[i], decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * decimation_mode), flt_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i), - u8_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i), bm.get_weight_quant_mode()); + u8_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i), + bm.get_weight_quant_mode()); compute_quantized_weights_for_decimation( di, @@ -721,7 +722,8 @@ static float compress_symbolic_block_for_partition_2planes( weight_high_value2[i], decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * decimation_mode + 1), flt_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i + 1), - u8_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i + 1), bm.get_weight_quant_mode()); + u8_quantized_decimated_quantized_weights + BLOCK_MAX_WEIGHTS * (2 * i + 1), + bm.get_weight_quant_mode()); // Compute weight quantization errors for the block mode qwt_errors[i] = compute_error_of_weight_set_2planes(
[verilator] Debug and fix loading of sections in verilator
@@ -164,12 +164,16 @@ static std::vector<uint8_t> FlattenElfFile(const std::string &filepath) { continue; } - if (phdr.p_memsz == 0) { + if (phdr.p_memsz == 0 || phdr.p_filesz == 0) { + std::cout << "Program header number " << i << " in `" << filepath + << "' has size 0; ignoring." << std::endl; continue; } if (!any || phdr.p_paddr < low) { low = phdr.p_paddr; + std::cout << "Program header number " << i << " in `" << filepath + << "' low is " << std::hex << low << std::endl; } Elf32_Addr seg_top = phdr.p_paddr + (phdr.p_memsz - 1); @@ -183,6 +187,8 @@ static std::vector<uint8_t> FlattenElfFile(const std::string &filepath) { if (!any || seg_top > high) { high = seg_top; + std::cout << "Program header number " << i << " in `" << filepath + << "' high is " << std::hex << high << std::endl; } any = true; @@ -209,6 +215,9 @@ static std::vector<uint8_t> FlattenElfFile(const std::string &filepath) { if (phdr.p_type != PT_LOAD) { continue; } + if (phdr.p_memsz == 0 || phdr.p_filesz == 0) { + continue; + } // Check the segment actually fits in the file if (file_size < phdr.p_offset + phdr.p_filesz) { @@ -237,6 +246,13 @@ static std::vector<uint8_t> FlattenElfFile(const std::string &filepath) { // Write a "segment" of data to the given memory area. static void WriteSegment(const MemArea &m, uint32_t offset, const std::vector<uint8_t> &data) { + std::cout << "Set `" << m.name << " " + << m.location << " " + << m.width_byte << " " + "0x" << std::hex << m.addr_loc.base << " " + "0x" << std::hex << m.addr_loc.size << " " + << "write with offset: 0x" << std::hex << offset << " " + << "write with size: 0x" << std::hex << data.size() << "\n"; assert(m.width_byte <= 32); assert(m.addr_loc.size == 0 || offset + data.size() <= m.addr_loc.size); assert((offset % m.width_byte) == 0); @@ -543,11 +559,11 @@ void DpiMemUtil::LoadElfToMemories(bool verbose, const std::string &filepath) { WriteSegment(mem_area, seg_rng.lo, seg_data); } catch (const SVScoped::Error &err) { std::ostringstream oss; - oss << "No memory found at `" << err.scope_name_ + std::cout << "No memory found at `" << err.scope_name_ << "' (the scope associated with region `" << mem_area.name << "', used by a segment that starts at LMA 0x" << std::hex << mem_area.addr_loc.base + seg_rng.lo << ")."; - throw std::runtime_error(oss.str()); + // throw std::runtime_warn(oss.str()); } } }
Fix vertical text align; Off-by-one error with counting lines.
@@ -1015,7 +1015,7 @@ void lovrGraphicsPrint(const char* str, size_t length, mat4 transform, float wra lovrFontMeasure(font, str, length, wrap, &width, &lineCount, &glyphCount); float scale = 1.f / font->pixelDensity; - float offsetY = (lineCount * font->rasterizer->height * font->lineHeight) * (valign / 2.f); + float offsetY = ((lineCount + 1) * font->rasterizer->height * font->lineHeight) * (valign / 2.f); mat4_scale(transform, scale, scale, scale); mat4_translate(transform, 0.f, offsetY, 0.f);
BugID:17137306: Modify linkkit_ntp_time_request to been deprecated API.
@@ -365,6 +365,16 @@ int being_deprecated linkkit_fota_init(handle_service_fota_callback_fp_t callbac */ int being_deprecated linkkit_invoke_fota_service(void *data_buf, int data_buf_length); +/** + * @brief this function used to get NTP time from cloud. + * + * @param ntp_reply_cb, user callback which register to ntp request. + * when cloud returns ntp reply, sdk would trigger the callback function + * + * @return 0 when success, -1 when fail. + */ +int being_deprecated linkkit_ntp_time_request(void (*ntp_reply_cb)(const char *ntp_offset_time_ms)); + #ifdef __cplusplus } #endif /* __cplusplus */
Galaxy table gets real pubkeys now.
@@ -13,12 +13,16 @@ import Network.Ethereum.Web3 import Data.Text (splitOn) +import qualified Data.ByteArray as BA import qualified Data.Map.Strict as M {-TODOs: - Dawn takes a NounMap instead of a Map. Need a conversion function. + - The Haskell Dawn structure as it exists right now isn't right? It can't + parse a real %dawn event in the event browser. + -} @@ -28,10 +32,17 @@ provider = HttpProvider azimuthContract = "0x223c067F8CF28ae173EE5CafEa60cA44C335fecB" +bytes32ToAtom :: BytesN 32 -> Atom +bytes32ToAtom bytes = + (reverse (BA.pack $ BA.unpack bytes)) ^. from atomBytes + +-- retrievePoint :: Quantity -> Int -> Web3 () +-- retrievePoint bloq point = +-- withAccount () $ +-- withParam (to .~ azimuthContract) $ +-- withParam (block .~ (BlockWithNumber bloq)) $ +-- (pubKey, _, _, _, _, _, _, _, keyRev, continuity) <- points idx --- Reads the --- --- TODO: I don't know how to change a BytesN 32 to an Atom. retrieveGalaxyTable :: Quantity -> Web3 (Map Ship (Rift, Life, Pass)) retrieveGalaxyTable bloq = withAccount () $ @@ -41,11 +52,9 @@ retrieveGalaxyTable bloq = where getRow idx = do (pubKey, _, _, _, _, _, _, _, keyRev, continuity) <- points idx - -- pubKey is a sort of ByteArray. pure (fromIntegral idx, (fromIntegral continuity, fromIntegral keyRev, - fromIntegral 0)) - -- pubKey ^. from atomBytes)) + bytes32ToAtom pubKey)) -- Reads the Turf domains off the blockchain at block height `bloq`. readAmesDomains :: Quantity -> Web3 ([Turf]) @@ -83,9 +92,9 @@ dawnVent (Seed (Ship ship) life ring oaf) = do -- withParam (to .~ azimuthContract) $ -- points 15 - -- Retrieve the galaxy table [MUST FIX s/5/255/ AND PUBKEY TO ATOM] - -- galaxyTable <- retrieveGalaxyTable dBloq - -- print $ show galaxyTable + -- Retrieve the galaxy table [MUST FIX s/5/255/] + galaxyTable <- retrieveGalaxyTable dBloq + print $ show galaxyTable -- Read Ames domains [DONE] -- dTurf <- readAmesDomains dBloq
Fixed issue with integer too large on 32 bit compilers
/* - * Copyright (c) 2016 Lammert Bies + * Copyright (c) 2016-2018 Lammert Bies * Copyright (c) 2013-2016 the Civetweb developers * Copyright (c) 2004-2013 Sergey Lyubka * @@ -59,7 +59,7 @@ LIBHTTP_API uint64_t httplib_get_random( void ) { */ lfsr = (lfsr >> 1) | ((((lfsr >> 0) ^ (lfsr >> 1) ^ (lfsr >> 3) ^ (lfsr >> 4)) & 1) << 63); - lcg = lcg * 6364136223846793005 + 1442695040888963407; + lcg = lcg * 6364136223846793005ull + 1442695040888963407ull; } /*
tools: fix fpgainfo security key sysfs path -max10/bmc sysfs path are different for n3000 and d5005 cards d5005 sysfspath dfl*/*spi*/spi_master/spi*/spi*/**/security/ n3000 sysfspath dfl*/*spi*/spi_master/spi*/**/security/ resuraeve search path *dfl*/*spi*/*spi*/*spi*/**/security/
#include "board_common.h" -#define DFL_SYSFS_SEC_GLOB "dfl*/*spi*/spi_master/spi*/spi*/**/security/" +#define DFL_SYSFS_SEC_GLOB "*dfl*/*spi*/*spi*/*spi*/**/security/" #define DFL_SYSFS_SEC_USER_FLASH_COUNT DFL_SYSFS_SEC_GLOB "*flash_count" #define DFL_SYSFS_SEC_BMC_CANCEL DFL_SYSFS_SEC_GLOB "bmc_canceled_csks" #define DFL_SYSFS_SEC_BMC_ROOT DFL_SYSFS_SEC_GLOB "bmc_root_entry_hash"
Add handling for `processMtcWatchType`
@@ -140,6 +140,15 @@ enum_map_t watchTypeMap[] = { {NULL, -1} }; +enum_map_t mtcCategoriesWatchTypeMap[] = { + {"fs", CFG_MTC_FS}, + {"net", CFG_MTC_NET}, + {"http", CFG_MTC_HTTP}, + {"dns", CFG_MTC_DNS}, + {"process", CFG_MTC_PROC}, + {NULL, -1} +}; + enum_map_t boolMap[] = { {"true", TRUE}, {"false", FALSE}, @@ -1233,6 +1242,14 @@ processMtcWatchType(config_t* config, yaml_document_t* doc, yaml_node_t* node) if (!scope_strcmp(value, "statsd")) { cfgMtcStatsdEnableSet(config, TRUE); } + + metric_category_t category; + for(category = CFG_MTC_FS; category <= CFG_MTC_PROC; ++category) { + if (!scope_strcmp(value, mtcCategoriesWatchTypeMap[category].str)) { + cfgMtcCategoryEnableSet(config, TRUE, category); + } + } + if (value) scope_free(value); } @@ -1264,6 +1281,11 @@ processMtcWatch(config_t* config, yaml_document_t* doc, yaml_node_t* node) // clear them all, then set values for whatever we find. cfgMtcStatsdEnableSet(config, FALSE); + metric_category_t category; + for (category=CFG_MTC_FS; category<=CFG_MTC_PROC; ++category) { + cfgMtcCategoryEnableSet(config, FALSE, category); + } + if (node->type != YAML_SEQUENCE_NODE) return; yaml_node_item_t* item; foreach(item, node->data.sequence.items) {
HV: Trace: Remove macro GEN_CASE Macro GEN_CASE in hypervisor is not used. It's just for userspcace tool acrntrace and we get one copy of it in ./tools/acrntrace/trace_event.h. So, remove it. Acked-by: Eddie Dong
#include <sbuf.h> -#define GEN_CASE(id) case (id): { id##_FMT; break; } - #define TRACE_CUSTOM 0xFC #define TRACE_FUNC_ENTER 0xFD #define TRACE_FUNC_EXIT 0xFE #define TRACE_STR 0xFF -#define TRACE_TIMER_ACTION_ADDED_FMT \ -{PR("TIMER_ACTION ADDED: ID %d, deadline %llx total: %d\n", \ - (p)->a, ((uint64_t)((p)->c)<<32)|(p)->b, (p)->d); } - -#define TRACE_TIMER_ACTION_PCKUP_FMT \ -{PR("TIMER_ACTION PCKUP: ID %d, deadline %llx total: %d\n", \ - (p)->a, ((uint64_t)((p)->c)<<32)|(p)->b, (p)->d); } - -#define TRACE_TIMER_ACTION_UPDAT_FMT \ -{PR("TIMER_ACTION UPDAT: ID %d, deadline %llx total: %d\n", \ - (p)->a, ((unsigned long)((p)->c)<<32)|(p)->b, (p)->d); } - -#define TRACE_TIMER_IRQ_FMT \ -PR("TIMER_IRQ total: %llx\n", (p)->e) - -#define TRACE_CUSTOM_FMT \ -PR("CUSTOM: 0x%llx 0x%llx\n", (p)->e, (p)->f) - -#define TRACE_FUNC_ENTER_FMT \ -PR("ENTER: %s\n", (p)->str) - -#define TRACE_FUNC_EXIT_FMT \ -PR("EXIT : %s\n", (p)->str) - -#define TRACE_STR_FMT \ -PR("STR: %s\n", (p)->str) - -#define ALL_CASES \ - GEN_CASE(TRACE_TIMER_ACTION_ADDED); \ - GEN_CASE(TRACE_TIMER_ACTION_PCKUP); \ - GEN_CASE(TRACE_TIMER_ACTION_UPDAT); \ - GEN_CASE(TRACE_TIMER_IRQ); \ - GEN_CASE(TRACE_CUSTOM); \ - GEN_CASE(TRACE_STR); \ - GEN_CASE(TRACE_FUNC_ENTER); \ - GEN_CASE(TRACE_FUNC_EXIT); - /* sizeof(trace_entry) == 3 x 64bit */ struct trace_entry { uint64_t tsc; /* TSC */ @@ -236,15 +197,6 @@ TRACE_16STR(int evid, const char name[]) #else /* HV_DEBUG */ -#define TRACE_TIMER_ACTION_ADDED_FMT -#define TRACE_TIMER_ACTION_PCKUP_FMT -#define TRACE_TIMER_ACTION_UPDAT_FMT -#define TRACE_TIMER_IRQ_FMT -#define TRACE_CUSTOM_FMT -#define TRACE_FUNC_ENTER_FMT -#define TRACE_FUNC_EXIT_FMT -#define TRACE_STR_FMT - #define TRACE_ENTER #define TRACE_EXIT
Fix diagnostics builds
@@ -1267,14 +1267,14 @@ static float prepare_block_statistics( lowest_correlation = astc::min(lowest_correlation, fabsf(ba_cov)); // Diagnostic trace points - trace_add_data("min_r", blk->data_min.lane<0>()); - trace_add_data("max_r", blk->data_max.lane<0>()); - trace_add_data("min_g", blk->data_min.lane<1>()); - trace_add_data("max_g", blk->data_max.lane<1>()); - trace_add_data("min_b", blk->data_min.lane<2>()); - trace_add_data("max_b", blk->data_max.lane<2>()); - trace_add_data("min_a", blk->data_min.lane<3>()); - trace_add_data("max_a", blk->data_max.lane<3>()); + trace_add_data("min_r", blk.data_min.lane<0>()); + trace_add_data("max_r", blk.data_max.lane<0>()); + trace_add_data("min_g", blk.data_min.lane<1>()); + trace_add_data("max_g", blk.data_max.lane<1>()); + trace_add_data("min_b", blk.data_min.lane<2>()); + trace_add_data("max_b", blk.data_max.lane<2>()); + trace_add_data("min_a", blk.data_min.lane<3>()); + trace_add_data("max_a", blk.data_max.lane<3>()); trace_add_data("cov_rg", fabsf(rg_cov)); trace_add_data("cov_rb", fabsf(rb_cov)); trace_add_data("cov_ra", fabsf(ra_cov)); @@ -1300,9 +1300,9 @@ void compress_block( float lowest_correl; TRACE_NODE(node0, "block"); - trace_add_data("pos_x", blk->xpos); - trace_add_data("pos_y", blk->ypos); - trace_add_data("pos_z", blk->zpos); + trace_add_data("pos_x", blk.xpos); + trace_add_data("pos_y", blk.ypos); + trace_add_data("pos_z", blk.zpos); // Set stricter block targets for luminance data as we have more bits to play with bool block_is_l = blk.is_luminance(); @@ -1325,14 +1325,14 @@ void compress_block( #if defined(ASTCENC_DIAGNOSTICS) // Do this early in diagnostic builds so we can dump uniform metrics // for every block. Do it later in release builds to avoid redundant work! - float error_weight_sum = prepare_error_weight_block(ctx, input_image, *bsd, *blk, *ewb); + float error_weight_sum = prepare_error_weight_block(ctx, input_image, *bsd, blk, ewb); float error_threshold = ctx.config.tune_db_limit * error_weight_sum * block_is_l_scale * block_is_la_scale; - lowest_correl = prepare_block_statistics(bsd->texel_count, *blk, *ewb); - + lowest_correl = prepare_block_statistics(bsd->texel_count, blk, ewb); + trace_add_data("lowest_correl", lowest_correl); trace_add_data("tune_error_threshold", error_threshold); #endif
apps/blemesh: Add callbacks for mesh health fault handling This is required for MESH/SR/HM/RFS/* tests.
/* Company ID*/ #define CID_VENDOR 0xFFFF +#define FAULT_ARR_SIZE 2 + +static bool has_reg_fault = true; + static struct bt_mesh_cfg cfg_srv = { .relay = BT_MESH_RELAY_DISABLED, .beacon = BT_MESH_BEACON_ENABLED, @@ -52,7 +56,83 @@ static struct bt_mesh_cfg cfg_srv = { .relay_retransmit = BT_MESH_TRANSMIT(2, 20), }; +static int +fault_get_cur(struct bt_mesh_model *model, + uint8_t *test_id, + uint16_t *company_id, + uint8_t *faults, + uint8_t *fault_count) +{ + uint8_t reg_faults[FAULT_ARR_SIZE] = { [0 ... FAULT_ARR_SIZE-1] = 0xff }; + + console_printf("fault_get_cur() has_reg_fault %u\n", has_reg_fault); + + *test_id = 0x00; + *company_id = CID_VENDOR; + + *fault_count = min(*fault_count, sizeof(reg_faults)); + memcpy(faults, reg_faults, *fault_count); + + return 0; +} + +static int +fault_get_reg(struct bt_mesh_model *model, + uint16_t company_id, + uint8_t *test_id, + uint8_t *faults, + uint8_t *fault_count) +{ + if (company_id != CID_VENDOR) { + return -BLE_HS_EINVAL; + } + + console_printf("fault_get_reg() has_reg_fault %u\n", has_reg_fault); + + *test_id = 0x00; + + if (has_reg_fault) { + uint8_t reg_faults[FAULT_ARR_SIZE] = { [0 ... FAULT_ARR_SIZE-1] = 0xff }; + + *fault_count = min(*fault_count, sizeof(reg_faults)); + memcpy(faults, reg_faults, *fault_count); + } else { + *fault_count = 0; + } + + return 0; +} + +static int +fault_clear(struct bt_mesh_model *model, uint16_t company_id) +{ + if (company_id != CID_VENDOR) { + return -BLE_HS_EINVAL; + } + + has_reg_fault = false; + + return 0; +} + +static int +fault_test(struct bt_mesh_model *model, uint8_t test_id, uint16_t company_id) +{ + if (company_id != CID_VENDOR) { + return -BLE_HS_EINVAL; + } + + has_reg_fault = true; + bt_mesh_fault_update(model->elem); + + return 0; +} + static struct bt_mesh_health health_srv = { + .fault_get_cur = &fault_get_cur, + .fault_get_reg = &fault_get_reg, + .fault_clear = &fault_clear, + .fault_test = &fault_test, }; static struct bt_mesh_model_pub gen_level_pub;
Fix variable access.
@@ -232,7 +232,7 @@ jobs: rpath=$(dirname $(dirname $(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())")))/lib libpython=$(basename $(find ${rpath} -maxdepth 1 -name *.dylib)) libpython="${libpython%.*}" - find $(python -c "import site; print(site.getsitepackages()[0])") -name '*tinysplinepython*' -exec install_name_tool -add_rpath ${rpath} {} \; -exec install_name_tool -change @rpath/Python @rpath/{libpython} {} \; -exec otool -L {} \; + find $(python -c "import site; print(site.getsitepackages()[0])") -name '*tinysplinepython*' -exec install_name_tool -add_rpath ${rpath} {} \; -exec install_name_tool -change @rpath/Python @rpath/${libpython} {} \; -exec otool -L {} \; elif [ "$RUNNER_OS" == "Windows" ]; then find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*win*.whl" -exec python -m pip install {} \; fi
story: update `|story-remove` to use new types
:: :::: :: +/- *story :- %say |= $: [now=@da eny=@uvJ bec=beak] [[syd=desk =aeon:clay ~] ~] =/ our p.bec :: XX should `base` here be syd or q.bec =/ tak .^(tako:clay %cs /(scot %p our)/base/(scot %ud aeon)/tako/~) -=/ tale=(map tako:clay [@t @t]) - .^((map tako:clay [@t @t]) %cx /(scot %p our)/[syd]/(scot %da now)/story) -=. tale (~(del by tale) tak) +=/ tale=story .^(story %cx /(scot %p our)/[syd]/(scot %da now)/story) +=. tale (~(del ju tale) tak) :- %helm-pass [%c [%info syd %& [/story %ins story+!>(tale)]~]] \ No newline at end of file
Solve memory leaks and unconditional jumps found in MetaCall node loader.
@@ -2112,8 +2112,8 @@ void node_loader_impl_thread(void * data) const size_t path_max_length = PATH_MAX; #endif - char exe_path_str[path_max_length]; - size_t exe_path_str_size, exe_path_str_offset = 0; + char exe_path_str[path_max_length] = { 0 }; + size_t exe_path_str_size = 0, exe_path_str_offset = 0; #if defined(WIN32) || defined(_WIN32) unsigned int length = GetModuleFileName(NULL, exe_path_str, path_max_length); @@ -2125,8 +2125,15 @@ void node_loader_impl_thread(void * data) if (length == -1 || length == path_max_length) { + /* TODO: Report error */ + + /* TODO: Make logs thread safe */ /* log_write("metacall", LOG_LEVEL_ERROR, "node loader register invalid working directory path (%s)", exe_path_str); */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2147,14 +2154,19 @@ void node_loader_impl_thread(void * data) /* Get the boostrap path */ const char bootstrap_file_str[] = "bootstrap.js"; - char bootstrap_path_str[path_max_length]; - size_t bootstrap_path_str_size; + char bootstrap_path_str[path_max_length] = { 0 }; + size_t bootstrap_path_str_size = 0; const char * load_library_path_env = getenv("LOADER_LIBRARY_PATH"); - size_t load_library_path_length; + size_t load_library_path_length = 0; if (load_library_path_env == NULL) { + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2187,6 +2199,11 @@ void node_loader_impl_thread(void * data) if (node_impl_ptr_length <= 0) { + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2196,6 +2213,11 @@ void node_loader_impl_thread(void * data) if (node_impl_ptr_str == NULL) { + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2209,6 +2231,11 @@ void node_loader_impl_thread(void * data) if (register_ptr_length <= 0) { + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2219,6 +2246,12 @@ void node_loader_impl_thread(void * data) if (register_ptr_str == NULL) { free(node_impl_ptr_str); + + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; } @@ -2232,6 +2265,12 @@ void node_loader_impl_thread(void * data) { free(node_impl_ptr_str); free(register_ptr_str); + + /* TODO: Report error */ + + /* Unlock node implementation mutex */ + uv_mutex_unlock(&node_impl->mutex); + return; }
Disable downsizing on error if V4L2 is enabled V4L2 device is created with the initial device size, it does not support resizing. PR <https://github.com/Genymobile/scrcpy/pull/2947>
@@ -1545,13 +1545,20 @@ parse_args_with_getopt(struct scrcpy_cli_args *args, int argc, char *argv[], return false; } - if (opts->v4l2_device && opts->lock_video_orientation - == SC_LOCK_VIDEO_ORIENTATION_UNLOCKED) { + if (opts->v4l2_device) { + if (opts->lock_video_orientation == + SC_LOCK_VIDEO_ORIENTATION_UNLOCKED) { LOGI("Video orientation is locked for v4l2 sink. " "See --lock-video-orientation."); opts->lock_video_orientation = SC_LOCK_VIDEO_ORIENTATION_INITIAL; } + // V4L2 could not handle size change. + // Do not log because downsizing on error is the default behavior, + // not an explicit request from the user. + opts->downsize_on_error = false; + } + if (opts->v4l2_buffer && !opts->v4l2_device) { LOGE("V4L2 buffer value without V4L2 sink\n"); return false;
u3: abort all home-road bails
@@ -661,7 +661,10 @@ u3m_dump(void) c3_i u3m_bail(u3_noun how) { - if ( (c3__exit == how) && (u3R == &u3H->rod_u) ) { + if ( &(u3H->rod_u) == u3R ) { + // XX set exit code + // + fprintf(stderr, "home: bailing out\r\n"); abort(); } @@ -689,6 +692,8 @@ u3m_bail(u3_noun how) switch ( how ) { case c3__foul: case c3__oops: { + // XX set exit code + // fprintf(stderr, "bailing out\r\n"); abort(); } @@ -699,6 +704,9 @@ u3m_bail(u3_noun how) // choice but to use the signal process; and we require the flat // form of how. // + // XX JB: these seem unrecoverable, at least wrt memory management, + // so they've been disabled above for now + // c3_assert(_(u3a_is_cat(how))); u3m_signal(how); }
Misra 10.4: fix last 2 violations
@@ -44,7 +44,7 @@ int append_crc(char *in, int in_len) { unsigned int crc = 0; for (int i = 0; i < in_len; i++) { crc <<= 1; - if ((in[i] ^ ((crc >> 15) & 1U)) != 0U) { + if (((unsigned int)(in[i]) ^ ((crc >> 15) & 1U)) != 0U) { crc = crc ^ 0x4599U; } crc &= 0x7fffU; @@ -69,7 +69,7 @@ int append_bits(char *in, int in_len, char *app, int app_len) { int append_int(char *in, int in_len, int val, int val_len) { int in_len_copy = in_len; for (int i = val_len - 1; i >= 0; i--) { - in[in_len_copy] = ((unsigned int)(val) & (1U << (unsigned int)(i))) != 0; + in[in_len_copy] = ((unsigned int)(val) & (1U << (unsigned int)(i))) != 0U; in_len_copy++; } return in_len_copy;
add note about possible runtime failures when NOWAIT starts working
program t273959 ! testing presence of NOWAIT clause on an omp target update directive +! NOTE: test may fail at runtime once NOWAIT is supported and working integer, parameter :: nsize=100 real p(nsize), v1(nsize), v2(nsize) call vec_mult(p, v1, v2, nsize)
Comment change only: clarify how to write an empty parameter (using uAtClientWriteString()).
@@ -637,7 +637,10 @@ void uAtClientWriteUint64(uAtClientHandle_t atHandle, * Quotes are added around the string if useQuotes is * true. The AT client tracks whether this is the first * parameter or not and adds delimiters to the outgoing - * AT command as appropriate. + * AT command as appropriate. You can skip a parameter + * (e.g. AT+BLAH=thing,,next_thing) by pointing pParam at + * a null terminator (i.e. 0) and setting useQuotes to + * false. * * @param atHandle the handle of the AT client. * @param pParam the null-terminated string to be
Fixes an issues when zip command is used instead of jar
@@ -257,12 +257,15 @@ function(add_celix_bundle) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) elseif(ZIP_COMMAND) - add_custom_command(OUTPUT ${BUNDLE_FILE} + add_custom_command(OUTPUT ${BUNDLE_CONTENT_DIR} COMMAND ${CMAKE_COMMAND} -E make_directory ${BUNDLE_CONTENT_DIR} + ) + + add_custom_command(OUTPUT ${BUNDLE_FILE} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${BUNDLE_GEN_DIR}/MANIFEST.MF META-INF/MANIFEST.MF COMMAND ${ZIP_COMMAND} -rq ${BUNDLE_FILE} * COMMENT "Packaging ${BUNDLE_TARGET_NAME}" - DEPENDS ${BUNDLE_TARGET_NAME} "$<TARGET_PROPERTY:${BUNDLE_TARGET_NAME},BUNDLE_DEPEND_TARGETS>" ${BUNDLE_GEN_DIR}/MANIFEST.MF + DEPENDS ${BUNDLE_CONTENT_DIR} ${BUNDLE_TARGET_NAME} "$<TARGET_PROPERTY:${BUNDLE_TARGET_NAME},BUNDLE_DEPEND_TARGETS>" ${BUNDLE_GEN_DIR}/MANIFEST.MF WORKING_DIRECTORY ${BUNDLE_CONTENT_DIR} ) else()
Minor safeguard added
@@ -157,6 +157,8 @@ static void dill_halfchan_term(struct dill_halfchan *ch) { static void dill_halfchan_close(struct hvfs *vfs) { struct dill_halfchan *ch = (struct dill_halfchan*)vfs; dill_assert(ch); + /* This shouldn't happen, but let's handle it decently. */ + if(dill_slow(ch->closed)) return; /* If the other half of the channel is still open do nothing. */ if(!dill_halfchan_other(ch)->closed) { ch->closed = 1;
Remove pkg_mod stuff
@@ -17,13 +17,6 @@ libent = static_library('ent', libent_dep = declare_dependency(include_directories : inc, link_with : libent) -pkg_mod = import('pkgconfig') -pkg_mod.generate(libraries : libent, - version : '0.0', - name : 'libent', - filebase : 'ent', - description : 'A library to get entropy.') - ent_sample = executable('sample', ['sample.c'], dependencies : [libent_dep])
Update: Small printing improvement
@@ -126,8 +126,8 @@ void Display_Help() { puts("\nAll C tests ran successfully, run python3 evaluation.py for more comprehensive evaluation!"); puts(""); - puts("Welcome to OpenNARS for Applications!"); - puts("`````````````````````````````````````"); + puts("Welcome to `OpenNARS for Applications`!"); + puts("```````````````````````````````````````"); puts(" __ "); puts("/_\\`-+-.__ "); puts(" | /o\\");
sp: add missing timestamp on aggregation function
@@ -554,6 +554,7 @@ static int sp_process_data_aggr(char *buf_data, size_t buf_size, msgpack_object key; msgpack_object val; struct mk_list *head; + struct flb_time tm; struct flb_sp_cmd *cmd = task->cmd; struct flb_sp_cmd_key *ckey; @@ -573,7 +574,11 @@ static int sp_process_data_aggr(char *buf_data, size_t buf_size, msgpack_sbuffer_init(&mp_sbuf); msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write); - /* set outgoing map and it fixed size */ + /* set outgoing array + map and it fixed size */ + msgpack_pack_array(&mp_pck, 2); + + flb_time_get(&tm); + flb_time_append_to_msgpack(&tm, &mp_pck, 0); msgpack_pack_map(&mp_pck, map_entries); /* Iterate incoming records */
use proper logging level and fix incorrect sync condition
@@ -199,7 +199,7 @@ namespace NCudaLib { const ui64 defragmentedMemory = (temp - (startPtr + writeOffset)); GetDefaultStream().Synchronize(); - MATRIXNET_INFO_LOG << "Defragment " << defragmentedMemory * 1.0 / 1024 / 1024 << " memory" + MATRIXNET_DEBUG_LOG << "Defragment " << defragmentedMemory * 1.0 / 1024 / 1024 << " memory" << " in " << (Now() - startTime).SecondsFloat() << " seconds " << Endl; LastBlock->Size += defragmentedMemory; LastBlock->Ptr = startPtr + writeOffset; @@ -275,6 +275,16 @@ namespace NCudaLib { TCudaMemoryAllocation<PtrType>::FreeMemory(Memory); } + + + template <class T> + bool NeedSyncForAllocation(ui64 size) const { + const ui64 requestedBlockSize = GetBlockSize<T>(size) + MEMORY_REQUEST_ADJUSTMENT; + const bool canUseFirstFreeBlock = FirstFreeBlock != LastBlock && (FirstFreeBlock->Size >= requestedBlockSize); + return (LastBlock->Size < requestedBlockSize || ((LastBlock->Size - requestedBlockSize) <= MINIMUM_FREE_MEMORY_TO_DEFRAGMENTATION)) && !canUseFirstFreeBlock; + } + + template <typename T = char> TMemoryBlock<T>* Create(ui64 size) { ui64 requestedBlockSize = GetBlockSize<T>(size); @@ -310,9 +320,9 @@ namespace NCudaLib { return; } - MATRIXNET_INFO_LOG << "Starting memory defragmentation" << Endl; - MATRIXNET_INFO_LOG << "Fragmented memory " << memoryToDefragment * 1.0 / 1024 / 1024 << Endl; - MATRIXNET_INFO_LOG << "Free memory in last block " << LastBlock->Size * 1.0 / 1024 / 1024 << Endl; + MATRIXNET_DEBUG_LOG << "Starting memory defragmentation" << Endl; + MATRIXNET_DEBUG_LOG << "Fragmented memory " << memoryToDefragment * 1.0 / 1024 / 1024 << Endl; + MATRIXNET_DEBUG_LOG << "Free memory in last block " << LastBlock->Size * 1.0 / 1024 / 1024 << Endl; if ((memoryToDefragment > LastBlock->Size) && (LastBlock->Size < MINIMUM_FREE_MEMORY_TO_DEFRAGMENTATION)) { ythrow TOutOfMemoryError() << "Error: We don't have enough memory to defragmentation"; @@ -330,12 +340,6 @@ namespace NCudaLib { return FreeMemory; } - template <class T> - bool NeedSyncForAllocation(ui64 size) const { - const ui64 requestedBlockSize = GetBlockSize<T>(size) + MEMORY_REQUEST_ADJUSTMENT; - const bool canUseFirstFreeBlock = FirstFreeBlock != LastBlock && (FirstFreeBlock->Size < requestedBlockSize); - return (LastBlock->Size < requestedBlockSize || ((LastBlock->Size - requestedBlockSize) <= MINIMUM_FREE_MEMORY_TO_DEFRAGMENTATION)) && !canUseFirstFreeBlock; - } }; extern template class TStackLikeMemoryPool<EPtrType::CudaDevice>;
linux-raspberrypi: Bump to 5.10.78 To include Pi Zero 2W device-tree changes for brcmfmac firmware.
-LINUX_VERSION ?= "5.10.76" +LINUX_VERSION ?= "5.10.78" LINUX_RPI_BRANCH ?= "rpi-5.10.y" LINUX_RPI_KMETA_BRANCH ?= "yocto-5.10" -SRCREV_machine = "3728690b4a894dd57d3913f048dcab12bb61251e" +SRCREV_machine = "b2c047ab7e17a4ed702d313581620e826c58cc3c" SRCREV_meta = "e1979ceb171bc91ef2cb71cfcde548a101dab687" KMETA = "kernel-meta"
Factorize resource release after CreateProcess() Free the wide characters string in all cases before checking for errors.
@@ -94,10 +94,10 @@ sc_process_execute_p(const char *const argv[], HANDLE *handle, goto error_close_stderr; } - if (!CreateProcessW(NULL, wide, NULL, NULL, TRUE, 0, NULL, NULL, &si, - &pi)) { + BOOL ok = CreateProcessW(NULL, wide, NULL, NULL, TRUE, 0, NULL, NULL, &si, + &pi); free(wide); - + if (!ok) { if (GetLastError() == ERROR_FILE_NOT_FOUND) { ret = SC_PROCESS_ERROR_MISSING_BINARY; } @@ -115,7 +115,6 @@ sc_process_execute_p(const char *const argv[], HANDLE *handle, CloseHandle(stderr_write_handle); } - free(wide); *handle = pi.hProcess; return SC_PROCESS_SUCCESS;
Add restrictions for hls_intersect
@@ -171,6 +171,11 @@ choice config HLS_INTERSECT bool "HLS Intersect" + help + This example often does not meet timing (> 200ps negative slack) in bitstream generation. + Running on FPGA hardware has passed for many times but it is NOT guaranteed. + It is intended as a HLS coding example to show how to make two implementations for a single target. + It also shows how to invoke the hardware action several times from C main() function. select ENABLE_HLS_SUPPORT select FORCE_SDRAM_OR_BRAM select DISABLE_NVME
[libc/time] Add microseconds time get feature in gettimeofday.
@@ -120,7 +120,7 @@ struct tm* localtime_r(const time_t* t, struct tm* r) time_t local_tz; int utc_plus; - utc_plus = 0; /* GTM: UTC+0 */ + utc_plus = 8; /* GMT: UTC+8 */ local_tz = *t + utc_plus * 3600; return gmtime_r(&local_tz, r); } @@ -183,18 +183,14 @@ char* ctime(const time_t *tim_p) } RTM_EXPORT(ctime); -/** - * Returns the current time. - * - * @param time_t * t the timestamp pointer, if not used, keep NULL. - * - * @return The value ((time_t)-1) is returned if the calendar time is not available. - * If timer is not a NULL pointer, the return value is also stored in timer. - * - */ -RT_WEAK time_t time(time_t *t) +static void get_timeval(struct timeval *tv) { - time_t time_now = ((time_t)-1); /* default is not available */ + if (tv == RT_NULL) + return; + /* default is not available */ + tv->tv_sec = -1; + /* default is 0 */ + tv->tv_usec = 0; #ifdef RT_USING_RTC static rt_device_t device = RT_NULL; @@ -210,26 +206,41 @@ RT_WEAK time_t time(time_t *t) { if (rt_device_open(device, 0) == RT_EOK) { - rt_device_control(device, RT_DEVICE_CTRL_RTC_GET_TIME, &time_now); + rt_device_control(device, RT_DEVICE_CTRL_RTC_GET_TIME, &tv->tv_sec); + rt_device_control(device, RT_DEVICE_CTRL_RTC_GET_TIME_US, &tv->tv_usec); rt_device_close(device); } } #endif /* RT_USING_RTC */ - /* if t is not NULL, write timestamp to *t */ - if (t != RT_NULL) - { - *t = time_now; - } - - if(time_now == (time_t)-1) + if (tv->tv_sec == (time_t) -1) { /* LOG_W will cause a recursive printing if ulog timestamp function is turned on */ rt_kprintf("Cannot find a RTC device to provide time!\r\n"); errno = ENOSYS; } +} - return time_now; +/** + * Returns the current time. + * + * @param time_t * t the timestamp pointer, if not used, keep NULL. + * + * @return The value ((time_t)-1) is returned if the calendar time is not available. + * If timer is not a NULL pointer, the return value is also stored in timer. + * + */ +RT_WEAK time_t time(time_t *t) +{ + struct timeval now; + + get_timeval(&now); + + if (t) + { + *t = now.tv_sec; + } + return now.tv_sec; } RTM_EXPORT(time); @@ -344,12 +355,10 @@ RTM_EXPORT(timegm); /* TODO: timezone */ int gettimeofday(struct timeval *tv, struct timezone *tz) { - time_t t = time(RT_NULL); + get_timeval(tv); - if (tv != RT_NULL && t != (time_t)-1) + if (tv != RT_NULL && tv->tv_sec != (time_t) -1) { - tv->tv_sec = t; - tv->tv_usec = 0; return 0; } else
KDB Tool: Reformat CMake code
@@ -36,18 +36,14 @@ if (BUILD_STATIC) add_dependencies (kdb-static kdberrors_generated) set_target_properties (kdb-static PROPERTIES LINKER_LANGUAGE CXX) - set_target_properties (kdb-static PROPERTIES - COMPILE_DEFINITIONS "HAVE_KDBCONFIG_H;ELEKTRA_STATIC") + set_target_properties (kdb-static PROPERTIES COMPILE_DEFINITIONS "HAVE_KDBCONFIG_H;ELEKTRA_STATIC") - target_link_libraries (kdb-static - elektra-static - elektratools-static) + target_link_libraries (kdb-static elektra-static elektratools-static) # TODO: add helper libraries of plugins, too if (CMAKE_STATIC_FLAGS) - set_target_properties (kdb-static PROPERTIES LINK_FLAGS - ${CMAKE_STATIC_FLAGS}) + set_target_properties (kdb-static PROPERTIES LINK_FLAGS ${CMAKE_STATIC_FLAGS}) endif () install (TARGETS kdb-static DESTINATION bin)
Fix fd leak in pg_verifybackup An error code path newly-introduced by forgot to close a file descriptor when verifying a file's checksum. Per report from Coverity, via Tom Lane.
@@ -730,6 +730,7 @@ verify_file_checksum(verifier_context *context, manifest_file *m, { report_backup_error(context, "could not initialize checksum of file \"%s\"", relpath); + close(fd); return; }
Use tostring' when throwing top of stack as error
@@ -266,7 +266,6 @@ module Foreign.Lua.Core ( ) where import Prelude hiding (EQ, LT, compare, concat, error) -import qualified Prelude import Control.Monad import Data.ByteString (ByteString) @@ -310,37 +309,9 @@ throwTopMessageAsError = throwTopMessageAsError' id throwTopMessageAsError' :: (String -> String) -> Lua a throwTopMessageAsError' msgMod = do - ty <- ltype (-1) - msg <- case ty of - TypeNil -> return "nil" - TypeBoolean -> show <$> toboolean (-1) - TypeLightUserdata -> showPointer - TypeNumber -> Char8.unpack <$> tostring (-1) - TypeString -> Char8.unpack <$> tostring (-1) - TypeTable -> tryTostringMetaMethod - TypeFunction -> showPointer - TypeThread -> showPointer - TypeUserdata -> showPointer - TypeNone -> Prelude.error "Error while receiving the error message!" - pop 1 - throwLuaError (msgMod msg) - where - showPointer = show <$> topointer (-1) - tryTostringMetaMethod = do - hasMT <- getmetatable (-1) - if not hasMT - then showPointer - else do - -- push getmetatable(t).__tostring - pushstring (Char8.pack "__tostring") *> rawget (-2) - remove (-2) -- remove metatable from stack - isFn <- isfunction (-1) - if isFn - then do - insert (-2) - call 1 1 - Char8.unpack <$> tostring (-1) - else pop 1 *> showPointer + msg <- tostring' stackTop + pop 2 -- remove error and error string pushed by tostring' + throwLuaError (msgMod (Char8.unpack msg)) -- | Convert a Haskell function userdata object into a CFuntion. The userdata -- object must be at the top of the stack. Errors signaled via @'error'@ are
hv: vtd: check vtd enabling status with spinlock Check vtd translation enabling status when enable/disable translation inside dmar_enable/disable_translation with spinlock. Acked-by: Anthony Xu
@@ -357,16 +357,16 @@ static bool dmar_unit_support_aw(const struct dmar_drhd_rt *dmar_unit, uint32_t static void dmar_enable_translation(struct dmar_drhd_rt *dmar_unit) { - uint32_t status; + uint32_t status = 0; spinlock_obtain(&(dmar_unit->lock)); + if ((dmar_unit->gcmd & DMA_GCMD_TE) == 0U) { dmar_unit->gcmd |= DMA_GCMD_TE; iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd); - /* 32-bit register */ dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_TES, false, &status); - status = iommu_read32(dmar_unit, DMAR_GSTS_REG); + } spinlock_release(&(dmar_unit->lock)); @@ -378,11 +378,12 @@ static void dmar_disable_translation(struct dmar_drhd_rt *dmar_unit) uint32_t status; spinlock_obtain(&(dmar_unit->lock)); + if ((dmar_unit->gcmd & DMA_GCMD_TE) != 0U) { dmar_unit->gcmd &= ~DMA_GCMD_TE; iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd); - /* 32-bit register */ dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_TES, true, &status); + } spinlock_release(&(dmar_unit->lock)); } @@ -433,9 +434,7 @@ static int dmar_register_hrhd(struct dmar_drhd_rt *dmar_unit) dev_dbg(ACRN_DBG_IOMMU, "dmar uint doesn't support snoop control!"); } - if ((dmar_unit->gcmd & DMA_GCMD_TE) != 0U) { dmar_disable_translation(dmar_unit); - } return 0; } @@ -789,10 +788,7 @@ static void dmar_enable(struct dmar_drhd_rt *dmar_unit) static void dmar_disable(struct dmar_drhd_rt *dmar_unit) { - if ((dmar_unit->gcmd & DMA_GCMD_TE) != 0U) { dmar_disable_translation(dmar_unit); - } - dmar_fault_event_mask(dmar_unit); }
Remove unnecessary entry in hslua.h
@@ -24,10 +24,3 @@ int hslua_next(lua_State *L, int index, int *status); /* auxiliary library */ const char *hsluaL_tolstring(lua_State *L, int index, size_t *len); - -/* -** function calling -*/ - -/* Wraps a Haskell function with an userdata object. */ -void hslua_newhsfunwrapper(lua_State *L, HsStablePtr fn);
mat4: fix rmc multiplication
@@ -695,8 +695,8 @@ CGLM_INLINE float glm_mat4_rmc(vec4 r, mat4 m, vec4 c) { vec4 tmp; - glm_mat4_mulv(m, r, tmp); - return glm_vec4_dot(c, tmp); + glm_mat4_mulv(m, c, tmp); + return glm_vec4_dot(r, tmp); } #endif /* cglm_mat_h */
Fix merge error with libcrypto.num
@@ -5312,4 +5312,3 @@ OSSL_ENCODER_CTX_set_cleanup ? 3_0_0 EXIST::FUNCTION: OSSL_DECODER_INSTANCE_get_input_type ? 3_0_0 EXIST::FUNCTION: OSSL_ENCODER_CTX_set_passphrase_cb ? 3_0_0 EXIST::FUNCTION: EVP_PKEY_typenames_do_all ? 3_0_0 EXIST::FUNCTION: -OSSL_DECODER_INSTANCE_get_input_type ? 3_0_0 EXIST::FUNCTION:
lyb parser REFACTOR add lyb_completion_node_inner
@@ -933,6 +933,36 @@ lyb_create_term(struct lyd_lyb_ctx *lybctx, const struct lysc_node *snode, struc return ret; } +/** + * @brief Validate inner node, autodelete default values nad create implicit nodes. + * + * @param[in,out] lybctx LYB context. + * @param[in] snode Schema of the inner node. + * @param[in] node Parsed inner node. + * @return LY_ERR value. + */ +static LY_ERR +lyb_validate_node_inner(struct lyd_lyb_ctx *lybctx, const struct lysc_node *snode, struct lyd_node *node) +{ + LY_ERR ret = LY_SUCCESS; + uint32_t impl_opts; + + if (!(lybctx->parse_opts & LYD_PARSE_ONLY)) { + /* new node validation, autodelete CANNOT occur, all nodes are new */ + ret = lyd_validate_new(lyd_node_child_p(node), snode, NULL, NULL); + LY_CHECK_RET(ret); + + /* add any missing default children */ + impl_opts = (lybctx->val_opts & LYD_VALIDATE_NO_STATE) ? LYD_IMPLICIT_NO_STATE : 0; + ret = lyd_new_implicit_r(node, lyd_node_child_p(node), NULL, + NULL, &lybctx->node_when, &lybctx->node_exts, + &lybctx->node_types, impl_opts, NULL); + LY_CHECK_RET(ret); + } + + return ret; +} + /** * @brief Parse opaq node. * @@ -1132,17 +1162,10 @@ lyb_parse_subtree_r(struct lyd_lyb_ctx *lybctx, struct lyd_node *parent, struct LY_CHECK_GOTO(ret, cleanup); } - if (!(lybctx->parse_opts & LYD_PARSE_ONLY)) { - /* new node validation, autodelete CANNOT occur, all nodes are new */ - ret = lyd_validate_new(lyd_node_child_p(node), snode, NULL, NULL); + /* additional procedure for inner node */ + ret = lyb_validate_node_inner(lybctx, snode, node); LY_CHECK_GOTO(ret, cleanup); - /* add any missing default children */ - ret = lyd_new_implicit_r(node, lyd_node_child_p(node), NULL, NULL, &lybctx->node_when, &lybctx->node_exts, - &lybctx->node_types, (lybctx->val_opts & LYD_VALIDATE_NO_STATE) ? LYD_IMPLICIT_NO_STATE : 0, NULL); - LY_CHECK_GOTO(ret, cleanup); - } - if (snode->nodetype & (LYS_RPC | LYS_ACTION | LYS_NOTIF)) { /* rememeber the RPC/action/notification */ lybctx->op_node = node;
Return -1 properly from do_X509_REQ_verify and do_X509_verify
@@ -2322,23 +2322,35 @@ int do_X509_CRL_sign(X509_CRL *x, EVP_PKEY *pkey, const char *md, return rv; } +/* + * do_X509_verify returns 1 if the signature is valid, + * 0 if the signature check fails, or -1 if error occurs. + */ int do_X509_verify(X509 *x, EVP_PKEY *pkey, STACK_OF(OPENSSL_STRING) *vfyopts) { int rv = 0; if (do_x509_init(x, vfyopts) > 0) - rv = (X509_verify(x, pkey) > 0); + rv = X509_verify(x, pkey); + else + rv = -1; return rv; } +/* + * do_X509_REQ_verify returns 1 if the signature is valid, + * 0 if the signature check fails, or -1 if error occurs. + */ int do_X509_REQ_verify(X509_REQ *x, EVP_PKEY *pkey, STACK_OF(OPENSSL_STRING) *vfyopts) { int rv = 0; if (do_x509_req_init(x, vfyopts) > 0) - rv = (X509_REQ_verify_ex(x, pkey, - app_get0_libctx(), app_get0_propq()) > 0); + rv = X509_REQ_verify_ex(x, pkey, + app_get0_libctx(), app_get0_propq()); + else + rv = -1; return rv; }
VERSION bump to version 2.0.112
@@ -62,7 +62,7 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) # set version of the project set(LIBYANG_MAJOR_VERSION 2) set(LIBYANG_MINOR_VERSION 0) -set(LIBYANG_MICRO_VERSION 111) +set(LIBYANG_MICRO_VERSION 112) set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_MICRO_VERSION}) # set version of the library set(LIBYANG_MAJOR_SOVERSION 2)
cmake: hide not so important vars so that all vars fit on my screen
@@ -403,6 +403,7 @@ mark_as_advanced ( XercesC_DIR OPENSSL_INCLUDE_DIR LUA_EXECUTABLE # The following settings are internal (not to be changed by users): + FEDORA CARGO_EXECUTABLE DIFF_COMMAND GLib_CONFIG_INCLUDE_DIR @@ -441,6 +442,7 @@ mark_as_advanced ( ADDED_DIRECTORIES ADDED_PLUGINS REMOVED_PLUGINS + REMOVED_TOOLS ADDED_BINDINGS LIBGCRYPTCONFIG_EXECUTABLE jna
Added test for tcp_wrap_fd
IN THE SOFTWARE. */ +#include <libdill.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> #include <string.h> +#include <pthread.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <sys/types.h> +#include <unistd.h> +#include <netdb.h> + +#include <libdill.h> + #include "assert.h" #include "../libdill.h" @@ -89,6 +102,7 @@ coroutine void client4(int port) { } static void move_lots_of_data(size_t nbytes, size_t buffer_size); +static void test_wrap_fd(); int main(void) { char buf[16]; @@ -206,6 +220,8 @@ int main(void) { move_lots_of_data(5000, 2001); /* This and below will fail */ move_lots_of_data(5000, 3000); + test_wrap_fd(); + return 0; } @@ -311,3 +327,46 @@ static void move_lots_of_data(size_t nbytes, size_t buf_size) { hclose(pp[0]); hclose(pp[1]); } + +static void test_wrap_fd() { + struct addrinfo hints, *res; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + if (getaddrinfo("libdill.org", "80", &hints, &res) < 0) { + printf("Fail\n"); + exit(1); + } + + int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); + if (fd == -1) { + printf("Fail\n"); + exit(1); + } + + if (connect(fd, res->ai_addr, res->ai_addrlen) == -1) { + printf("Fail\n"); + exit(1); + } + + int h = tcp_wrap_fd(fd); + if (h < 0) { + printf("Fail\n"); + exit(1); + } + + h = http_attach(h); + http_sendrequest(h, "GET", "/", -1); + http_sendfield(h, "Host", "libdill.org", -1); + http_done(h, -1); + char reason[256]; + http_recvstatus(h, reason, sizeof(reason), -1); + while (true) { + char name[256]; + char value[256]; + int rc = http_recvfield(h, name, sizeof(name), value, sizeof(value), -1); + if(rc == -1 && errno == EPIPE) break; + } + h = http_detach(h, -1); +} \ No newline at end of file
Remove dead light.state read code This isn't used anymore, further this is the job of Poll Manager.
@@ -431,45 +431,6 @@ int DeRestPluginPrivate::getLightState(const ApiRequest &req, ApiResponse &rsp) return REQ_READY_SEND; } - // handle request to force query light state - if (req.hdr.hasKey("Query-State")) - { - bool enabled = false; - int diff = idleTotalCounter - lightNode->lastRead(READ_ON_OFF); - QString attrs = req.hdr.value("Query-State"); - - // only read if time since last read is not too short - if (diff > 3) - { - if (attrs.contains("on")) - { - lightNode->enableRead(READ_ON_OFF); - lightNode->setLastRead(READ_ON_OFF, idleTotalCounter); - enabled = true; - } - - if (attrs.contains("bri")) - { - lightNode->enableRead(READ_LEVEL); - lightNode->setLastRead(READ_LEVEL, idleTotalCounter); - enabled = true; - } - - if (attrs.contains("color") && lightNode->hasColor()) - { - lightNode->enableRead(READ_COLOR); - lightNode->setLastRead(READ_COLOR, idleTotalCounter); - enabled = true; - } - } - - if (enabled) - { - DBG_Printf(DBG_INFO, "Force read the attributes %s, for node %s\n", qPrintable(attrs), qPrintable(lightNode->address().toStringExt())); - processZclAttributes(lightNode); - } - } - // handle ETag if (req.hdr.hasKey(QLatin1String("If-None-Match"))) {
Docs - update component lists for 6.5
</row> <row> <entry>PL/Container and PL/Container images for Python, R </entry> - <entry>2.1.0</entry> + <entry>2.1.1</entry> + </row> + <row> + <entry>PL/Container and image for R </entry> + <entry>3.0.0 Beta</entry> + </row> + <row> + <entry>GreenplumR</entry> + <entry>1.0.0 Beta</entry> </row> <row class="- topic/row "> <entry colname="col1" class="- topic/entry ">PostGIS Spatial and Geographic Objects <body> <p> <ul id="ul_ckf_sfc_hbb"> - <li>Greenplum Platform Extension Framework (PXF) v5.10.1 - PXF, integrated with + <li>Greenplum Platform Extension Framework (PXF) v5.11.1 - PXF, integrated with Greenplum Database 6, provides access to Hadoop, object store, and SQL external data stores. Refer to <xref scope="peer" href="../admin_guide/external/pxf-overview.xml" >Accessing External Data with PXF</xref> in the <cite>Greenplum Database <title id="pm357649">Hadoop Distributions</title> <body> <p>Greenplum Database provides access to HDFS with the Greenplum Platform Extension Framework - (PXF).<ph otherprops="oss-only"> PXF v5.10.0 is integrated with Greenplum Database 6, and + (PXF).<ph otherprops="oss-only"> PXF v5.11.1 is integrated with Greenplum Database 6, and provides access to Hadoop, object store, and SQL external data stores. Refer to <xref scope="peer" href="../admin_guide/external/pxf-overview.xml">Accessing External Data with PXF</xref> in the <cite>Greenplum Database Administrator Guide</cite> for PXF </thead> <tbody> <row> - <entry colname="col1">5.10.0</entry> + <entry colname="col1">5.11.1, 5.10.1</entry> <entry colname="col2">2.x, 3.1+</entry> <entry colname="col3">1.x, 2.x, 3.1+</entry> <entry colname="col4">1.3.2</entry>
rexec: support send command with arguments
@@ -114,8 +114,10 @@ static int do_rexec(FAR struct rexec_arg_s *arg) int main(int argc, FAR char **argv) { + char cmd[CONFIG_NSH_LINELEN]; struct rexec_arg_s arg; int option; + int i; memset(&arg, 0, sizeof(arg)); @@ -159,6 +161,13 @@ int main(int argc, FAR char **argv) usage(argv[0]); } - arg.command = argv[optind]; + cmd[0] = '\0'; + for (i = optind; i < argc; i++) + { + strcat(cmd, argv[i]); + strcat(cmd, " "); + } + + arg.command = cmd; return do_rexec(&arg); }
Find Botan: Reformat CMake code
+# ~~~ # Try to find the Botan library. # # Defines: # # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. -# +# ~~~ if (NOT BOTAN_FOUND) include (FindPkgConfig) @@ -18,14 +19,13 @@ if (NOT BOTAN_FOUND) endif () if (BOTAN_FOUND) + # try to compile and link a minimal sample program against libbotan try_compile (HAS_BOTAN_4SURE "${CMAKE_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/src/plugins/crypto/compile_botan.cpp" - CMAKE_FLAGS - -DINCLUDE_DIRECTORIES:STRING=${BOTAN_INCLUDE_DIRS} - -DLINK_LIBRARIES:PATH=${BOTAN_LIBRARIES} - ) + CMAKE_FLAGS -DINCLUDE_DIRECTORIES:STRING=${BOTAN_INCLUDE_DIRS} + -DLINK_LIBRARIES:PATH=${BOTAN_LIBRARIES}) if (NOT HAS_BOTAN_4SURE) message (STATUS "Botan compile/linker test failed")
servo_v4: Disable some features to save space No room left in the flash for the ToT firmware. Remove some features to save space. BRANCH=servo TEST=Built the servo v4 firmware, 560 bytes in flash available.
#undef CONFIG_CMD_MEM #undef CONFIG_CMD_SHMEM #undef CONFIG_CMD_SYSLOCK +#undef CONFIG_CMD_TIMERINFO #undef CONFIG_CMD_WAITMS /* Enable control of I2C over USB */ /* PD features */ #define CONFIG_ADC +#undef CONFIG_ADC_WATCHDOG #define CONFIG_BOARD_PRE_INIT /* * If task profiling is enabled then the rx falling edge detection interrupts
zephyr test: Check EC_CMD_TYPEC_STATUS results After attaching emulated PD charger, using EC_CMD_TYPEC_STATUS to verify USB default charging state. TEST=zmake configure --test zephyr/test/drivers BRANCH=none Cq-Depend: chromium:3368262
@@ -113,6 +113,10 @@ static void test_attach_pd_charger(void) struct ec_response_charge_state charge_response; struct host_cmd_handler_args args = BUILD_HOST_COMMAND( EC_CMD_CHARGE_STATE, 0, charge_response, charge_params); + struct ec_params_typec_status typec_params; + struct ec_response_typec_status typec_response; + struct host_cmd_handler_args typec_args = BUILD_HOST_COMMAND( + EC_CMD_TYPEC_STATUS, 0, typec_response, typec_params); /* * TODO(b/209907297): Implement the steps of the test beyond USB default @@ -167,6 +171,22 @@ static void test_attach_pd_charger(void) "USB default current %dmA", charge_response.get_state.chg_current); + typec_params.port = 0; + zassert_ok(host_command_process(&typec_args), + "Failed to get Type-C state"); + zassert_true(typec_response.pd_enabled, + "Charger attached but PD disabled"); + zassert_true(typec_response.dev_connected, + "Charger attached but device disconnected"); + zassert_true(typec_response.sop_connected, + "Charger attached but not SOP capable"); + zassert_equal(typec_response.source_cap_count, 1, + "Charger has %d source PDOs", + typec_response.source_cap_count); + zassert_equal(typec_response.power_role, PD_ROLE_SINK, + "Charger attached, but TCPM power role is %d", + typec_response.power_role); + /* * 3. Wait for SenderResponseTimeout. Expect TCPM to send Request. * We could verify that the Request references the expected PDO, but
Update README.md fixed ack.vim link
@@ -167,7 +167,7 @@ You may need to use `sudo` or run as root for the make install. ### Vim -You can use Ag with [ack.vim][] by adding the following line to your `.vimrc`: +You can use Ag with [ack.vim](https://github.com/mileszs/ack.vim) by adding the following line to your `.vimrc`: let g:ackprg = 'ag --nogroup --nocolor --column'
update time calculation
@@ -382,9 +382,6 @@ public: uint32_t offset = hardware_.time() - rt_time; t.deserialize(data); - - // SerialBT2.print(t.data.sec); SerialBT2.print(" "); SerialBT2.println(t.data.nsec); - t.data.sec += offset / 1000; t.data.nsec += (offset % 1000) * 1000000UL; @@ -405,8 +402,8 @@ public: void setNow(Time & new_now) { uint32_t ms = hardware_.time(); - sec_offset = new_now.sec;// - ms / 1000 - 1; - nsec_offset = new_now.nsec;// - (ms % 1000) * 1000000UL + 1000000000UL; + sec_offset = new_now.sec; + nsec_offset = new_now.nsec; normalizeSecNSec(sec_offset, nsec_offset); }
misc: fix typo in set-ipfix-exporter CLI short_help Type: fix
@@ -569,7 +569,7 @@ VLIB_CLI_COMMAND (set_ipfix_exporter_command, static) = { "collector <ip4-address> [port <port>] " "src <ip4-address> [fib-id <fib-id>] " "[path-mtu <path-mtu>] " - "[template-interval <template-interval>]", + "[template-interval <template-interval>] " "[udp-checksum]", .function = set_ipfix_exporter_command_fn, };
Set correct "hosts" parameter in config for HB Hue version > 0.13.2
@@ -163,6 +163,15 @@ function checkHomebridge { local HOMEBRIDGE="" local IP_ADDRESS="" local HOMEBRIDGE_PIN="" + local hb_hue_version=$(npm list -g homebridge-hue | grep homebridge-hue | cut -d@ -f2 | xargs) + + #hostline used in config + local hostline="\"hosts\": [\"127.0.0.1\"]," + if [ ${hb_hue_version:2:2} -le 13 ]; then + if [ ${hb_hue_version:5:2} -lt 2 ]; then + hostline="\"host\": \"127.0.0.1\"," + fi + fi ## get database config params=( [0]="homebridge" [1]="ipaddress" [2]="homebridge-pin") @@ -333,7 +342,9 @@ function checkHomebridge { APIKEY="$SQL_RESULT" - # create config file if not exists + # if config file exists check if parameters are still valid + # to prevent this skript from overwrite config file set name parameter to something different then Phoscon Homebridge + # else create config file if not exists if [[ -f /home/$MAINUSER/.homebridge/config.json ]]; then # existing config found [[ $LOG_DEBUG ]] && echo "${LOG_DEBUG}found existing homebridge config.json" @@ -375,6 +386,18 @@ function checkHomebridge { sed -i "/\"pin\":/c\ \"pin\": \"${HB_PIN}\"" /home/$MAINUSER/.homebridge/config.json updated=true fi + # check if hostline is still correct + local hostline2="\"host\": \"127.0.0.1\"" + if [[ $hostline == "\"hosts\": [\"127.0.0.1\"]," ]]; then + hostline2="\"hosts\": \[\"127.0.0.1\"\]," + fi + # hostline2 only needed for grep + if [ -z "$(cat /home/$MAINUSER/.homebridge/config.json | grep "$hostline2")" ]; then + # hostline is wrong format for this hb hue version + [[ $LOG_DEBUG ]] && echo "${LOG_DEBUG}update hostline format in config file for this hb hue version" + sed -i "/\"host/c\ $hostline" /home/$MAINUSER/.homebridge/config.json + updated=true + fi if [[ $updated = true ]]; then putHomebridgeUpdated "homebridge" "updated" else @@ -414,7 +437,7 @@ function checkHomebridge { \"platforms\": [ { \"platform\": \"Hue\", - \"host\": \"127.0.0.1\", + ${hostline} \"users\": { \"${BRIDGEID}\": \"${APIKEY}\" },
misprint: SDL_SCANCODE_RGUI mapped twice, SDL_SCANCODE_LGUI not mapped
[SDL_SCANCODE_LCTRL] = tic_key_ctrl, [SDL_SCANCODE_LSHIFT] = tic_key_shift, [SDL_SCANCODE_LALT] = tic_key_alt, -[SDL_SCANCODE_RGUI] = tic_key_ctrl, +[SDL_SCANCODE_LGUI] = tic_key_ctrl, [SDL_SCANCODE_RCTRL] = tic_key_ctrl, [SDL_SCANCODE_RSHIFT] = tic_key_shift, [SDL_SCANCODE_RALT] = tic_key_alt,
Remove filterfd. Neat party trick, but not generally useful enough to belong in libstd.
@@ -12,7 +12,6 @@ pkg std = const espork : (cmd : byte[:][:] -> result((pid, fd, fd, fd), errno)) const sporkdir : (cmd : byte[:][:], dir : byte[:] -> result((pid, fd, fd), errno)) const esporkdir : (cmd : byte[:][:], dir : byte[:] -> result((pid, fd, fd, fd), errno)) - const filterfd : (fd : fd, cmd : byte[:][:] -> result(pid, errno)) ;; const run = {cmd @@ -117,26 +116,6 @@ const esporkdir = {cmd, dir -> `Err err } -const filterfd = {fd, cmd - var outfds : fd[2] - var err - - err = pipe(&outfds) - if err != Enone - -> `Err err - ;; - - match sporkfd(cmd, "", [fd, -1], outfds, [-1, 2]) - | `Ok pid: - dup2(outfds[0], fd) - close(outfds[0]); - close(outfds[1]); - -> `Ok pid - | `Err e: - -> `Err e - ;; -} - const sporkfd = {cmd, dir, infds, outfds, errfds var pid
nshlib/nsh_netcmds.c: fix nxstyle warning
/**************************************************************************** * apps/nshlib/nsh_netcmds.c * - * Copyright (C) 2007-2012, 2014-2015, 2017 Gregory Nutt. All rights reserved. + * Copyright (C) 2007-2012, 2014-2015, 2017 Gregory Nutt. + * All rights reserved. * Author: Gregory Nutt <[email protected]> * * Redistribution and use in source and binary forms, with or without @@ -204,7 +205,8 @@ static int ifconfig_callback(FAR struct nsh_vtbl_s *vtbl, FAR char *devname) /* Construct the full path to the /proc/net entry for this device */ - snprintf(buffer, IFNAMSIZ + 12, CONFIG_NSH_PROC_MOUNTPOINT "/net/%s", devname); + snprintf(buffer, IFNAMSIZ + 12, + CONFIG_NSH_PROC_MOUNTPOINT "/net/%s", devname); nsh_catfile(vtbl, "ifconfig", buffer); return OK; @@ -243,7 +245,7 @@ int tftpc_parseargs(FAR struct nsh_vtbl_s *vtbl, int argc, char **argv, break; case 'h': - if (!netlib_ipv4addrconv(optarg, (FAR unsigned char*)&args->ipaddr)) + if (!netlib_ipv4addrconv(optarg, (FAR uint8_t *)&args->ipaddr)) { nsh_error(vtbl, g_fmtarginvalid, argv[0]); badarg = true; @@ -419,7 +421,8 @@ static int nsh_foreach_netdev(nsh_netdev_callback_t callback, ****************************************************************************/ #ifdef HAVE_HWADDR -static inline bool nsh_addrconv(FAR const char *hwstr, FAR mac_addr_t *macaddr) +static inline bool nsh_addrconv(FAR const char *hwstr, + FAR mac_addr_t *macaddr) { /* REVISIT: How will we handle Ethernet and SLIP networks together? */ @@ -440,7 +443,8 @@ static inline bool nsh_addrconv(FAR const char *hwstr, FAR mac_addr_t *macaddr) ****************************************************************************/ #ifdef HAVE_HWADDR -static inline void nsh_sethwaddr(FAR const char *ifname, FAR mac_addr_t *macaddr) +static inline void nsh_sethwaddr(FAR const char *ifname, + FAR mac_addr_t *macaddr) { #if defined(CONFIG_NET_ETHERNET) netlib_setmacaddr(ifname, *macaddr); @@ -539,7 +543,8 @@ int cmd_ifdown(FAR struct nsh_vtbl_s *vtbl, int argc, char **argv) ifname = argv[1]; ret = netlib_ifdown(ifname); - nsh_output(vtbl, "ifdown %s...%s\n", ifname, (ret == OK) ? "OK" : "Failed"); + nsh_output(vtbl, "ifdown %s...%s\n", + ifname, (ret == OK) ? "OK" : "Failed"); return ret; } #endif @@ -722,7 +727,6 @@ int cmd_ifconfig(FAR struct nsh_vtbl_s *vtbl, int argc, char **argv) #ifdef HAVE_HWADDR /* Set Hardware Ethernet MAC address */ - /* REVISIT: How will we handle Ethernet and SLIP networks together? */ if (hw != NULL) {
multiplace_move_F for gpu
@@ -76,7 +76,7 @@ const void* multiplace_read(struct multiplace_array_s* ptr, const void* ref) if (cuda_ondevice(ref)) { if (NULL == ptr->ptr_gpu) - ptr->ptr_gpu = md_gpu_move(ptr->N, ptr->dims, ptr->ptr_cpu, ptr->size); + ptr->ptr_gpu = md_gpu_move(ptr->N, ptr->dims, ptr->ptr_ref, ptr->size); return ptr->ptr_gpu; } @@ -124,17 +124,18 @@ struct multiplace_array_s* multiplace_move(int D, const long dimensions[D], size struct multiplace_array_s* multiplace_move_F(int D, const long dimensions[D], size_t size, const void* ptr) { + auto result = multiplace_alloc(D, dimensions, size); + result->ptr_ref = (void*)ptr; + #ifdef USE_CUDA if (cuda_ondevice(ptr)) { - struct multiplace_array_s* ret = multiplace_move(D, dimensions, size, ptr); - md_free(ptr); - return ret; - } + result->ptr_gpu = (void*)ptr; + cuda_sync_device(); + } else #endif - - auto result = multiplace_alloc(D, dimensions, size); result->ptr_cpu = (void*)ptr; + return result; }
Keep file extension (dylib).
@@ -231,7 +231,6 @@ jobs: find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*macosx*.whl" -exec python -m pip install {} \; rpath=$(dirname $(dirname $(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())")))/lib libpython=$(basename $(find ${rpath} -maxdepth 1 -name *.dylib)) - libpython="${libpython%.*}" find $(python -c "import site; print(site.getsitepackages()[0])") -name '*tinysplinepython*' -exec install_name_tool -add_rpath ${rpath} {} \; -exec install_name_tool -change @rpath/Python @rpath/${libpython} {} \; -exec otool -L {} \; elif [ "$RUNNER_OS" == "Windows" ]; then find . -name "*$(python -c "import platform; print(''.join(platform.python_version_tuple()[0:2]))")*win*.whl" -exec python -m pip install {} \;
add rpm target in Makefile
@@ -4,6 +4,11 @@ ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Base path used to install. DESTDIR ?= /usr/local +# Variables for building rpm +VERSION ?= 0.2.0 +RELEASE_TARBALL_URL ?= https://github.com/alibaba/inclavare-containers/archive/v$(VERSION).tar.gz +RPMBUILD_DIR ?= /tmp/inclavare-containers/shim/rpmbuild + ifneq "$(strip $(shell command -v go 2>/dev/null))" "" GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) @@ -39,7 +44,7 @@ SHIM_CGO_ENABLED ?= 0 BINARIES=$(addprefix bin/,$(COMMANDS)) -.PHONY: clean all build binaries help install uninstall +.PHONY: clean all binaries help install uninstall rpm .DEFAULT: default all: binaries @@ -49,11 +54,19 @@ bin/containerd-shim-rune-v2: @echo "bin/containerd-shim-rune-v2" @CGO_ENABLED=${SHIM_CGO_ENABLED} GOOS=${GOOS} go build ${GO_BUILD_FLAGS} -o bin/containerd-shim-rune-v2 ./cmd/containerd-shim-rune-v2 -binaries: $(BINARIES) ## build binaries +binaries: clean $(BINARIES) ## build binaries clean: ## clean up binaries @echo "$@" @rm -f $(BINARIES) + @rm -fr ${RPMBUILD_DIR} + +rpm: + @mkdir -p $(RPMBUILD_DIR) + @echo "%_topdir $(RPMBUILD_DIR)" >> ~/.rpmmacros + @mkdir -p $(RPMBUILD_DIR)/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + @wget -P $(RPMBUILD_DIR)/SOURCES $(RELEASE_TARBALL_URL) + $(MAKE) -C dist/centos rpm RPMBUILD_DIR=$(RPMBUILD_DIR) install: ## install binaries @echo "$@ $(BINARIES)"
webdojo: use /~_~/slog endpoint As updated in
@@ -18,8 +18,8 @@ export default class Subscription { } setupSlog() { - const slog = new EventSource('/~/slog', { withCredentials: true }); let available = false; + const slog = new EventSource('/~_~/slog', { withCredentials: true }); slog.onopen = e => { console.log('slog: opened stream');
Update docs for buffer/push-word Should be little endian, not big endian.
@@ -387,7 +387,7 @@ static const JanetReg buffer_cfuns[] = { "buffer/push-word", cfun_buffer_word, JDOC("(buffer/push-word buffer x)\n\n" "Append a machine word to a buffer. The 4 bytes of the integer are appended " - "in twos complement, big endian order, unsigned. Returns the modified buffer. Will " + "in twos complement, little endian order, unsigned. Returns the modified buffer. Will " "throw an error if the buffer overflows.") }, {
Add entry point for Python3 UDFs
@@ -27,6 +27,7 @@ def onregister_yql_python_udf(unit, *args): 'yql/udfs/common/python/main' ] if not py3 else [ 'library/python/runtime_py3', + 'yql/udfs/common/python/main_py3' ]) else: flavor = 'System'
[scripts][do-qemuarm] set script=no when using tun/tap
@@ -105,7 +105,7 @@ elif (( $DO_NET_TAP )); then # sudo tunctl -u $(whoami) -t ${IFNAME} # sudo ifconfig ${IFNAME} up # sudo ip link set ${IFNAME} master ${BRIDGE} - ARGS+=" -netdev tap,id=vmnic,ifname=qemu0,downscript=no" + ARGS+=" -netdev tap,id=vmnic,ifname=qemu0,script=no,downscript=no" ARGS+=" -device virtio-net-device,netdev=vmnic" #SUDO="sudo " else
hw/xive: Use XIVE_VSD_SIZE more I think Cedric forgot this patch at some point.
@@ -1672,7 +1672,7 @@ static bool xive_prealloc_tables(struct xive *x) } memset(x->eq_ind_base, 0, al); xive_dbg(x, "EQi at %p size 0x%llx\n", x->eq_ind_base, al); - x->eq_ind_count = XIVE_EQ_TABLE_SIZE / 8; + x->eq_ind_count = XIVE_EQ_TABLE_SIZE / XIVE_VSD_SIZE; /* Indirect VP table. Limited to one top page. */ al = ALIGN_UP(XIVE_VP_TABLE_SIZE, 0x10000); @@ -1686,7 +1686,7 @@ static bool xive_prealloc_tables(struct xive *x) return false; } xive_dbg(x, "VPi at %p size 0x%llx\n", x->vp_ind_base, al); - x->vp_ind_count = XIVE_VP_TABLE_SIZE / 8; + x->vp_ind_count = XIVE_VP_TABLE_SIZE / XIVE_VSD_SIZE; memset(x->vp_ind_base, 0, al); /* Populate/initialize VP/EQs indirect backing */
Run perltidy, use strict+warnings on mkrc.pl
# in the file LICENSE in the source distribution or at # https://www.openssl.org/source/license.html +use strict; +use warnings; use lib "."; use configdata; use File::Spec::Functions; my $versionfile = catfile( $config{sourcedir}, "include/openssl/opensslv.h" ); +my ( $ver, $v1, $v2, $v3, $v4, $beta, $version ); + open FD, $versionfile or die "Couldn't open include/openssl/opensslv.h: $!\n"; while (<FD>) { if (/OPENSSL_VERSION_NUMBER\s+(0x[0-9a-f]+)/i) { @@ -30,12 +34,13 @@ while(<FD>) { } close(FD); -$filename = $ARGV[0]; $filename =~ /(.*)\.([^.]+)$/; -$basename = $1; -$extname = $2; +my $filename = $ARGV[0]; +$filename =~ /(.*)\.([^.]+)$/; +my $basename = $1; +my $extname = $2; -if ($extname =~ /dll/i) { $description = "OpenSSL shared library"; } -else { $description = "OpenSSL application"; } +my $description = "OpenSSL application"; +$description = "OpenSSL shared library" if $extname =~ /dll/i; print <<___; #include <winver.h>
rpio: Use pypi.bbclass This should resolve recent issues with the non-https URL in SRC_URI.
@@ -5,12 +5,10 @@ SECTION = "devel/python" LICENSE = "LGPLv3+" LIC_FILES_CHKSUM = "file://README.rst;beginline=41;endline=53;md5=d5d95d7486a4d98c999675c23196b25a" -SRCNAME = "RPIO" +PYPI_PACKAGE = "RPIO" +inherit pypi -SRC_URI = "http://pypi.python.org/packages/source/R/RPIO/${SRCNAME}-${PV}.tar.gz \ - file://0001-include-sys-types.h-explicitly-for-getting-caddr_t-d.patch \ - " -S = "${WORKDIR}/${SRCNAME}-${PV}" +SRC_URI += "file://0001-include-sys-types.h-explicitly-for-getting-caddr_t-d.patch" inherit setuptools
[ML302v003][#821]open random32 function
#define GENERATE_KEY_REPEAT_TIMES 100 -/* + uint32_t random32(void) { static uint32_t seed = 0; @@ -65,7 +65,7 @@ uint32_t random32(void) return seed; } -*/ + BOAT_RESULT BoatRandom(BUINT8 *output, BUINT32 outputLen, void *rsvd)
Bump to version 2.05.68
@@ -75,7 +75,7 @@ GIT_COMMIT_DATE = $$system("git show -s --format=%ct $$GIT_TAG") # Version Major.Minor.Build # Important: don't change the format of this line since it's parsed by scripts! -DEFINES += GW_SW_VERSION=\\\"2.05.67\\\" +DEFINES += GW_SW_VERSION=\\\"2.05.68\\\" DEFINES += GW_SW_DATE=$$GIT_COMMIT_DATE DEFINES += GW_API_VERSION=\\\"1.16.0\\\" DEFINES += GIT_COMMMIT=\\\"$$GIT_COMMIT\\\"
Fix vulkan device name
@@ -2359,7 +2359,7 @@ static VkResult overlay_CreateSwapchainKHR( // ss << "." << VK_VERSION_PATCH(prop.driverVersion); // } // ss << ")"; - swapchain_data->sw_stats.deviceName = ss.str(); + swapchain_data->sw_stats.deviceName = prop.deviceName; get_device_name(prop.vendorID, prop.deviceID, swapchain_data->sw_stats); if(driverProps.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY){ ss << "NVIDIA";
Fix memory leak in mac_newctx() on error
@@ -74,6 +74,7 @@ static void *mac_newctx(void *provctx, const char *propq, const char *macname) return pmacctx; err: + OPENSSL_free(pmacctx->propq); OPENSSL_free(pmacctx); EVP_MAC_free(mac); return NULL;
add cuda debugger to debugger.run
-- imports import("core.base.option") import("core.project.config") +import("detect.tools.find_cudagdb") +import("detect.tools.find_cudamemcheck") import("detect.tools.find_gdb") import("detect.tools.find_lldb") import("detect.tools.find_windbg") @@ -49,6 +51,27 @@ function _run_gdb(program, argv) return true end +-- run cuda-gdb +function _run_cudagdb(program, argv) + + -- find cudagdb + local gdb = find_cudagdb({program = config.get("debugger")}) + if not gdb then + return false + end + + -- patch arguments + argv = argv or {} + table.insert(argv, 1, program) + table.insert(argv, 1, "--args") + + -- run it + os.execv(gdb, argv) + + -- ok + return true +end + -- run lldb function _run_lldb(program, argv) @@ -95,6 +118,26 @@ function _run_windbg(program, argv) return true end +-- run cuda-memcheck +function _run_cudamemcheck(program, argv) + + -- find cudamemcheck + local cudamemcheck = find_cudamemcheck({program = config.get("debugger")}) + if not cudamemcheck then + return false + end + + -- patch arguments + argv = argv or {} + table.insert(argv, 1, program) + + -- run it + os.execv(cudamemcheck, argv) + + -- ok + return true +end + -- run x64dbg function _run_x64dbg(program, argv) @@ -176,6 +219,8 @@ function main(program, argv) { {"lldb" , _run_lldb} , {"gdb" , _run_gdb} + , {"cudagdb" , _run_cudagdb} + , {"cudamemcheck", _run_cudamemcheck} } -- for windows target or on windows? @@ -190,6 +235,16 @@ function main(program, argv) local debugger = config.get("debugger") if debugger then debugger = debugger:lower() + + -- try exactmatch first + for _, _debugger in ipairs(debuggers) do + if debugger:startswith(_debugger[1]) then + if _debugger[2](program, argv) then + return + end + end + end + for _, _debugger in ipairs(debuggers) do if debugger:find(_debugger[1]) then if _debugger[2](program, argv) then
Add position and box-sizing setter
@@ -1261,27 +1261,39 @@ void Widget_SetMargin( LCUI_Widget w, float top, float right, void Widget_Move( LCUI_Widget w, float left, float top ) { - SetStyle( w->custom_style, key_top, top, px ); - SetStyle( w->custom_style, key_left, left, px ); + Widget_SetStyle( w, key_top, top, px ); + Widget_SetStyle( w, key_left, left, px ); Widget_UpdateStyle( w, FALSE ); } void Widget_Resize( LCUI_Widget w, float width, float height ) { - SetStyle( w->custom_style, key_width, width, px ); - SetStyle( w->custom_style, key_height, height, px ); + Widget_SetStyle( w, key_width, width, px ); + Widget_SetStyle( w, key_height, height, px ); Widget_UpdateStyle( w, FALSE ); } void Widget_Show( LCUI_Widget w ) { - SetStyle( w->custom_style, key_visible, TRUE, int ); + Widget_SetStyle( w, key_visible, TRUE, int ); Widget_UpdateStyle( w, FALSE ); } void Widget_Hide( LCUI_Widget w ) { - SetStyle( w->custom_style, key_visible, FALSE, int ); + Widget_SetStyle( w, key_visible, FALSE, int ); + Widget_UpdateStyle( w, FALSE ); +} + +void Widget_SetPosition( LCUI_Widget w, LCUI_StyleValue position ) +{ + Widget_SetStyle( w, key_position, position, style ); + Widget_UpdateStyle( w, FALSE ); +} + +void Widget_SetBoxSizing( LCUI_Widget w, LCUI_StyleValue sizing ) +{ + Widget_SetStyle( w, key_box_sizing, sizing, style ); Widget_UpdateStyle( w, FALSE ); }
hpet init fails with no increment..
@@ -168,10 +168,11 @@ boolean init_hpet(heap misc, heap virtual_pagesized, heap pages) { timers = create_id_heap(misc, 0, field_from_u64(hpet->capid, HPET_CAPID_NUM_TIM_CAP) + 1, 1); hpet->conf |= U64_FROM_BIT(HPET_CONF_ENABLE_CNF_SHIFT); u64 prev = hpet->mainCounterRegister; - if (prev == hpet->mainCounterRegister) { + for (int i = 0; i < 10; i ++) { + if (prev == hpet->mainCounterRegister) + continue; + return true; + } console("Error: No increment HPET main counter\n"); return false; } - - return true; -}
apps/Makefile: Fix one missing instance of CONFIG_EXAMPLES_NSH* that was not changes to CONFIG_SYSTEM_NSH*.
@@ -76,7 +76,7 @@ BIN = libapps$(LIBEXT) # Symbol table for loadable apps. SYMTABSRC = $(APPDIR)$(DELIM)symtab_apps.c -SYMTABOBJ = $(APPDIR)$(DELIM)symtab_apps.o +SYMTABOBJ = $(APPDIR)$(DELIM)symtab_apps$(OBJEXT) # Build targets @@ -103,7 +103,7 @@ $(foreach SDIR, $(CLEANDIRS), $(eval $(call SDIR_template,$(SDIR),clean))) $(foreach SDIR, $(CLEANDIRS), $(eval $(call SDIR_template,$(SDIR),distclean))) make_symbols: -ifeq ($(CONFIG_EXAMPLES_NSH_SYMTAB),y) +ifeq ($(CONFIG_SYSTEM_NSH_SYMTAB),y) mkdir -p $(BIN_DIR) $(Q) $(APPDIR)$(DELIM)tools$(DELIM)mksymtab.sh $(BIN_DIR) $(SYMTABSRC) $(call COMPILE, $(SYMTABSRC), $(SYMTABOBJ))
Getting Started Guide: add instructions for other OSs Add instructions on how to build the hypervisor and device model on other operating systems such as Ubuntu/Debian, Fedora/doc/Redhat and CentOS.
@@ -281,32 +281,69 @@ Build ACRN from Source ********************** If you would like to build ACRN hypervisor and device model from source, -follow these steps, using your NUC as a development system: +follow these steps. -#. On your Clear Linux system, install the os-clr-on-clr bundle to get - the necessary tools. +Install build tools and dependencies +==================================== - .. code-block:: none +ARCN development is supported on popular Linux distributions, +each with their own way to install development tools: + +* On a Clear Linux development system, install the ``os-clr-on-clr`` bundle to get + the necessary tools: + + .. code-block:: console + + $ sudo swupd bundle-add os-clr-on-clr + +* On a Ubuntu/Debian development system: + + .. code-block:: console + + $ sudo apt install git \ + gnu-efi \ + libssl-dev \ + libpciaccess-dev \ + uuid-dev + +* On a Fedora/doc/Redhat development system: - # swupd bundle-add os-clr-on-clr + .. code-block:: console + + $ sudo dnf install gcc \ + libuuid-devel \ + openssl-devel \ + libpciaccess-devel + +* On a CentOS development system: + + .. code-block:: console + + $ sudo yum install gcc \ + libuuid-devel \ + openssl-devel \ + libpciaccess-devel + +Build the hypervisor and device model +===================================== #. Download the ACRN hypervisor and build it. - .. code-block:: none + .. code-block:: console - # git clone https://github.com/projectacrn/acrn-hypervisor - # cd acrn-hypervisor - # make PLATFORM=uefi + $ git clone https://github.com/projectacrn/acrn-hypervisor + $ cd acrn-hypervisor + $ make PLATFORM=uefi The build results are found in the ``build`` directory. #. Download the ACRN device model and build it. - .. code-block:: none + .. code-block:: console - # git clone https://github.com/projectacrn/acrn-devicemodel - # cd acrn-devicemodel - # make + $ git clone https://github.com/projectacrn/acrn-devicemodel + $ cd acrn-devicemodel + $ make The build results are found in the ``build`` directory.
Fix header in blockquote
:: if column has retreated, adjust stack =. ..$ (back col.saw) :: - =^ val sty.saw + =^ col-ok sty.saw ?+ (sub col.saw inr.ind) [| sty.saw] :: columns advanced $0 [& sty.saw] $8 [& %new %poem] == - ?. val ..$(err `[p.loc col.saw]) + ?. col-ok ..$(err `[p.loc col.saw]) :: =. inr.ind col.saw :: :: ++ push :: push context |=(mite +>(hac [cur hac], cur [+< ~])) + :: ++ entr :: enter container |= typ/mite ^+ +> :: ++ head :: parse heading %+ cook - |= a/manx:twig ^- marl:twig - =. a.g.a :_(a.g.a [%id (sanitize-to-id c.a)]) - [a]~ - :: - ;~ plug + |= {haxes/tape kids/tarp} ^- tarp + =/ tag (crip 'h' <(lent haxes)>) :: e.g. ### -> %h3 + =/ id (contents-to-id kids) + [[tag [%id id]~] kids]~ :: - :: # -> 1 -> %h1, ### -> 3 -> %h3, etc - :(cook |=(a/@u /(crip "h{<a>}")) lent (stun [1 6] hax)) - :: - ;~(pfix whit down) - == + ;~(pfix (star ace) ;~((glue whit) (stun [1 6] hax) down)) :: - ++ sanitize-to-id :: # text into elem id + ++ contents-to-id :: # text into elem id |= a/(list tuna:twig) ^- tape =; raw/tape %+ turn raw
Fix compiler warning introduced in
@@ -1364,7 +1364,7 @@ DefineRange(CreateRangeStmt *stmt) AclResult aclresult; ListCell *lc; ObjectAddress address; - ObjectAddress mltrngaddress; + ObjectAddress mltrngaddress PG_USED_FOR_ASSERTS_ONLY; Oid castFuncOid; /* Convert list of names to a name and namespace */
Update Encoding.md Add a missing "-"
@@ -135,7 +135,7 @@ Most other texture compression formats have a static channel assignment in terms of the expected data correlation. For example, ETC2+EAC assumes that RGB are always correlated and that alpha is non-correlated. ASTC can automatically encode data as either fully correlated across all 4 channels, or with any one -channel assigned to a separate non correlated partition to the other three. +channel assigned to a separate non-correlated partition to the other three. The non-correlated channel can be changed on a block-by-block basis, so the compressor can dynamically adjust the coding based on the data present in the
Doc: release_notes_2.2 update
@@ -195,7 +195,7 @@ Build the ACRN Hypervisor on Ubuntu .. code-block:: none - $ make all BOARD_FILE=misc/vm-configs/xmls/board-xmls/whl-ipc-i7.xml SCENARIO_FILE=misc/vm-configs/xmls/config-xmls/whl-ipc-i7/industry.xml RELEASE=0 + $ make all BOARD_FILE=misc/vm_configs/xmls/board-xmls/whl-ipc-i7.xml SCENARIO_FILE=misc/vm_configs/xmls/config-xmls/whl-ipc-i7/industry.xml RELEASE=0 $ sudo make install $ sudo cp build/hypervisor/acrn.bin /boot/acrn/
pg_dump: improve big-O of partition dependency generation We were doing a linear search through the list of tables to find the correct OID. That's what findTableByOid() is for -- it does a binary search on the sorted list of tables -- so use it instead.
@@ -16064,24 +16064,26 @@ static void setExtPartDependency(TableInfo *tblinfo, int numTables) { int i; - int j; for (i = 0; i < numTables; i++) { TableInfo *tbinfo = &(tblinfo[i]); + TableInfo *parent; Oid parrelid = tbinfo->parrelid; if (parrelid == 0) continue; - for (j = 0; j < numTables; j++) + parent = findTableByOid(parrelid); + if (!parent) { - TableInfo *ti = &(tblinfo[j]); - if (ti->dobj.catId.oid != parrelid) - continue; - addObjectDependency(&ti->dobj, tbinfo->dobj.dumpId); - removeObjectDependency(&tbinfo->dobj, ti->dobj.dumpId); + write_msg(NULL, "parent table (OID %u) of partition \"%s\" (OID %u) not found\n", + parrelid, tbinfo->dobj.name, tbinfo->dobj.catId.oid); + exit_nicely(1); } + + addObjectDependency(&parent->dobj, tbinfo->dobj.dumpId); + removeObjectDependency(&tbinfo->dobj, parent->dobj.dumpId); } }
DH_check[_params]() use libctx of the dh for prime checks
@@ -73,7 +73,7 @@ int DH_check_params(const DH *dh, int *ret) BN_CTX *ctx = NULL; *ret = 0; - ctx = BN_CTX_new(); + ctx = BN_CTX_new_ex(dh->libctx); if (ctx == NULL) goto err; BN_CTX_start(ctx); @@ -155,7 +155,7 @@ int DH_check(const DH *dh, int *ret) if (!DH_check_params(dh, ret)) return 0; - ctx = BN_CTX_new(); + ctx = BN_CTX_new_ex(dh->libctx); if (ctx == NULL) goto err; BN_CTX_start(ctx);
fixes the makefile
@@ -30,7 +30,7 @@ ACTOR_GEN_SRC = cee-utils/orka-utils.c \ cee-utils/json-struct.c \ cee-utils/json-printf.c \ cee-utils/log.c \ - cee-utils/specs-gen.c + specs/specs-gen.c ACTOR_GEN_OBJS := $(ACTOR_GEN_SRC:%=$(ACTOR_OBJDIR)/%.o)
put new fortunastakes to last rank until we receive their active message
@@ -569,7 +569,7 @@ bool GetFortunastakeRanks(CBlockIndex* pindex) BOOST_FOREACH(CFortunaStake& mn, vecFortunastakeScoresList) { i++; - if (mn.nTimeRegistered > pindex->GetBlockTime()) { + if (mn.nTimeRegistered > pindex->GetBlockTime() || mn.active == 0) { vecFortunastakeScores.push_back(vecFortunastakeScores[i]); vecFortunastakeScores.erase(vecFortunastakeScores.begin() + i); }