content
stringlengths 7
2.61M
|
---|
<gh_stars>10-100
/*
* 文件名:ConsumerServiceImplTest.java
* 版权:Copyright 2007-2017 zxiaofan.com. Co. Ltd. All Rights Reserved.
* 描述: ConsumerServiceImplTest.java
* 修改人:zxiaofan
* 修改时间:2017年3月15日
* 修改内容:新增
*/
package com.zxiaofan.test.dubboConsumer.service;
import javax.annotation.Resource;
import org.junit.Test;
import com.zxiaofan.dubboConsumer.service.IConsumerService;
import com.zxiaofan.test.dubboConsumer.BaseTest;
/**
*
* @author zxiaofan
*/
public class ConsumerServiceImplTest extends BaseTest {
@Resource(name = "consumerService")
private IConsumerService consumerService;
/**
* Test method for {@link com.zxiaofan.dubboConsumer.service.impl.ConsumerServiceImpl#hi(java.lang.String)}.
*/
@Test
public void testHi() {
// String name = "boy_zxiaofan";
String name = "girl_zxiaofan";
String result = consumerService.hi(name);
System.out.println("TestDubboResult: " + result);
}
}
|
/*tile MPI processes in a block arrangement*/
void TileBlockDecomposition(void)
{
int n_gpf;
int np_x = 1;
int np_y = 1;
int np_z = 1;
n_gpf = greatest_prime_factor(nproc);
if(n_gpf!=2)
{
if(nz_global==1)
{
np_x = n_gpf;
np_y = nproc/np_x;
np_z = 1;
}else{
np_x = n_gpf;
n_gpf = greatest_prime_factor(nproc/n_gpf);
if(n_gpf!=2)
{
np_y = n_gpf;
np_z = nproc/(np_x*np_y);
}else{
while(np_x*np_y*np_z < nproc)
{
np_y*=2;
if(np_x*np_y*np_z==nproc)
break;
np_z*=2;
}
}
}
}else{
if(nz_global==1)
{
np_x = n_gpf;
np_y = nproc/np_x;
np_z = 1;
}else{
while(np_x*np_y*np_z < nproc)
{
np_x*=2;
if(np_x*np_y*np_z==nproc)
break;
np_y*=2;
if(np_x*np_y*np_z==nproc)
break;
np_z*=2;
}
}
}
int n_tmp;
if(np_z>np_y)
{
n_tmp = np_y;
np_y = np_z;
np_z = n_tmp;
}
if(np_y>np_x)
{
n_tmp = np_x;
np_x = np_y;
np_y = n_tmp;
}
if(np_z>np_y)
{
n_tmp = np_y;
np_y = np_z;
np_z = n_tmp;
}
nproc_x = np_x;
nproc_y = np_y;
nproc_z = np_z;
} |
America's Right: Anti-Establishment Conservatism from Goldwater to the Tea Party 68ers in West Germany is a sophisticated overview highlighting the heterogeneity of the New Lefts discourse on sexuality, which nuances recent, reductionist analyses that emphasise patriarchal dominance within the New Lefts organisational framework and portray the sexual-politics objectives of the sexual revolution movement in Germany as serving primarily to provide a license for (guilt-)free sex for heterosexual males. The book deserves praise for its excellent mix of theory and empirical research. Firstly, it is a first-rate source not just for sexual history but also for students of the history of earlymodern and modern Germany. Secondly, the authors thoroughly re-think, refine and occasionally turn around some themes in sexual history that have gradually solidified as dogma following the breakthrough success of The History of Sexuality. The Foucauldian framework is undeniably still at work and these German genealogies pay respect to Foucaults immensely creative intellectual contribution, but we can here glimpse promisingly fresh approaches to sexual history that can lead to innovative paths in future research. |
/// <reference path="../node_modules/@types/node/index.d.ts" />
/// <reference path="../node_modules/@types/jest/index.d.ts" />
/// <reference path="../node_modules/typemoq/dist/typemoq.d.ts" />
import {expect} from 'chai';
import {LogEvent, LogEventLevel} from '../src/logEvent';
import {MessageTemplate} from '../src/messageTemplate';
import {FilterStage} from '../src/filterStage';
describe('FilterStage', () => {
it('filters events according to the filter predicate', () => {
const predicate = (e: LogEvent) => e.messageTemplate.raw.indexOf('B') === 0;
const filterStage = new FilterStage(predicate);
const events = [
new LogEvent('', LogEventLevel.information, new MessageTemplate('A Message 1'), {}),
new LogEvent('', LogEventLevel.information, new MessageTemplate('B Message 1'), {}),
new LogEvent('', LogEventLevel.information, new MessageTemplate('B Message 2'), {}),
new LogEvent('', LogEventLevel.information, new MessageTemplate('C Message 1'), {})
];
const filteredEvents = filterStage.emit(events);
expect(filteredEvents).to.have.length(2);
expect(filteredEvents[0]).to.have.nested.property('messageTemplate.raw', 'B Message 1');
expect(filteredEvents[1]).to.have.nested.property('messageTemplate.raw', 'B Message 2');
});
it('does nothing when flushed', () => {
return new FilterStage(() => true).flush();
});
});
|
package lsann.util;
public class Pair<First, Second> {
public First first;
public Second second;
}
|
Study on BoronHydrogen Pairs in Bare and Passivated FloatZone Silicon Wafers This study deals with the dynamics of the formation and dissociation of boronhydrogen (BH) pairs in crystalline silicon during a rapid hightemperature treatment and subsequent dark annealing between 200 and 300°C. Highly accurate resistivity measurements are used to detect BH pairs in chemically polished Bdoped floatzone silicon. It is found that an unexpecteded high amount of hydrogen is present in the aspurchased wafers. Hydrogen is initially mostly paired to boron but can be dissolved by a short hightemperature firing step. If a firing step (530°C) is applied to bare, unpassivated Si wafers, most of the initial BH pairs are dissolved, and hydrogen dimers ( H2 ) form. With increasing peak temperature, an increasing amount of hydrogen leaves the H2⇌BH system, while the proportion of BH increases. Additional hydrogen can be introduced by firing a wafer passivated with plasmaenhanced chemical vapor deposition (PECVD) SiNx: H. A threestate model shows a good agreement with the measured data for both bare and coated samples as well as for different annealing temperatures. With increasing dark annealing temperatures, the BH dynamics accelerates, whereas the maximum BH concentration reached decreases. For temperatures above 280°C, significant changes in the reaction dynamics are observed. DOI: 10.1002/pssa.202100220 This study deals with the dynamics of the formation and dissociation of boron-hydrogen (BH) pairs in crystalline silicon during a rapid high-temperature treatment and subsequent dark annealing between 200 and 300 C. Highly accurate resistivity measurements are used to detect BH pairs in chemically polished B-doped float-zone silicon. It is found that an unexpecteded high amount of hydrogen is present in the as-purchased wafers. Hydrogen is initially mostly paired to boron but can be dissolved by a short high-temperature firing step. If a firing step (530 C) is applied to bare, unpassivated Si wafers, most of the initial BH pairs are dissolved, and hydrogen dimers (H 2 ) form. With increasing peak temperature, an increasing amount of hydrogen leaves the H 2 ⇌ BH system, while the proportion of BH increases. Additional hydrogen can be introduced by firing a wafer passivated with plasma-enhanced chemical vapor deposition (PECVD) SiN x :H. A three-state model shows a good agreement with the measured data for both bare and coated samples as well as for different annealing temperatures. With increasing dark annealing temperatures, the BH dynamics accelerates, whereas the maximum BH concentration reached decreases. For temperatures above 280 C, significant changes in the reaction dynamics are observed. (right-hand side of the above-mentioned reaction scheme). During this subsequent reaction, holes are released again, which implies that hydrogen occupies an electrically neutral, more favorable binding state HX 0. It cannot be excluded that hydrogen binds to itself (X H), however, in a different dimeric configuration (H 2C ) as suggested by Voronkov and Falster. An alternative explanation could be the effusion as neutral species. Assuming first-order reaction kinetics, the concentration of BH pairs is described by a sum of two exponential functions with time constants t 1,2 and amplitudes A 1,2, where the index 1 represents the formation, and index 2 the dissociation of BH pairs, whereas A ∞ giving the long-term limit. The introduction of the second exponential function extends the model from the study given by Voronkov and Falster and allows for a description of the measured data, as will be explained later on. The electrical resistivity is given by the elementary charge q, hole and electron concentration p, n, and their associated mobility p,n. For p-type material without any excess charge carriers, n is negligible, and p is equivalent to the equilibrium hole concentration p 0, which, in turn, is mainly determined by the (ionized) doping concentration N dop. Considering Equation, where deactivation of boron is caused by the formation of BH pairs, it follows This gives a direct measure for the concentration of BH pairs via resistance measurements. Experimental Section As purchased, chemically polished FZ-Si wafers, B-doped with N dop % 1.5 10 16 cm 3 ( % 1 cm) and a thickness of 250 m, are cut into 5 5 cm 2 samples. On some samples, hydrogenrich silicon nitride (SiN x :H) is deposited on both sides via plasma-enhanced chemical vapor deposition (PECVD) in a PlasmaLab100 reactor from Oxford Instruments (deposition temperature 400 C, total duration 12 min per side). By adjusting the ratio of the gas flows of ammonia (NH 3 ) and silane (SiH 4 ), 100 nm thick SiN x :H layers of different composition are produced observable by a variation in refractive indices n being 2.10, 2.40, and 2.55 at 630 nm, respectively. The samples were not subjected to any wet chemical treatment beforehand, meaning that the thin layer of silicon oxide wet-chemically grown by the wafer manufacturer is still present on all samples. Most of the samples receive a short high-temperature step in a conveyor belt furnace ("firing step"), with sample peak temperatures (T F ) between 500 and 850 C. The samples' temperature is monitored with a K-type thermocouple mechanically pressed onto the wafer surface. The set peak temperature of the belt furnace is adjusted in a way that the actual sample peak temperature T F matches the targeted temperature as closely as possible. Such a firing process, which is commonly used for metal contact formation on Si solar cells, is also known to release hydrogen from the SiN x :H layer into the Si bulk. The steep cool-down ramp leaves most of the hydrogen in the Si bulk in an electrically inactive state, which can be identified by the dimeric configuration H 2A. During a subsequent dark annealing on a hotplate (Praezitherm) at a temperature T DA, the reaction described in Equation is triggered, which is accompanied by a change in resistivity as described earlier. To quantify these changes, highly accurate four-terminal electrical resistance measurements are performed. More details and a thorough error analysis of the methodology are published elsewhere. Electrical contacts to the silicon are established with two parallel double stripes of thermally evaporated aluminum (Al) (see Figure 1). Highly Al-doped p regions are created by a pattern of laser pulses (laser-fired contacts ), enabling a reliable and low resistance contact. Neither Al evaporation nor the creation of LFCs cause significant temperature load on the sample. A digital multimeter (Keithley 2000, 6.5 digit) is used for the resistance measurements. As temperature control is a crucial factor for highly accurate measurements, they are performed on a temperature stabilized stage (T 25.0 C) inside a light-tight housing. The dark annealing treatment is briefly interrupted for each measurement point. With respect to the chosen cuboid sample geometry and under the assumption of a homogeneous resistivity in the total volume tested, resistance R is given by with distance of the inner electrodes d 40 mm, sample thickness t 250 m, and width w 50 mm. A geometry factor g 1.02 is used for compensation of a slightly inhomogeneous current distribution. It should be noted that due to the integral nature of the resistance measurement, it is impossible to draw conclusions on the (in)homogeneity of resistivity, in particular, in terms of depth. However, as most (but not all) of the samples used are subjected to a firing step during which homogenization of hydrogen in depth is expected, the assumption of a constant resistivity seems justified. Combining this equation with Equation and taking the difference between two measurements, the following expression is obtained for the difference in BH pairs BH with respect to a reference point www.advancedsciencenews.com www.pss-a.com Taking the difference ensures that N dop no longer appears as a parameter and does not have to be known exactly. Mobilities are calculated with an online tool from PV lighthouse using the approach described in the study given by Herguth and Winter. The choice of the reference resistance value R 0 is arbitrary and is given in the following in each case. It should be noted that this measurement method interprets all changes in resistance as a change in BH. Experimental Results and Discussion The first part of this section covers the evolution of BH pair concentration during a dark anneal at 220 C of FZ-Si samples, which have a hydrogen-rich SiN x :H layer on both sides. The samples were subjected to a short high-temperature firing step before annealing to introduce hydrogen from the SiN x :H into the Si bulk. In the second part, the effect of a dark anneal on bare FZ-Si with no obvious hydrogen source is investigated. In the last part, the dark annealing temperature is varied between 200 and 290 C to determine the critical temperature at which BH pairs do not form any longer. Evolution of BH Pairs in FZ-Si with a SiN x :H Layer For each refractive index (n 2.10, 2.40, and 2.55), three samples were prepared and subjected to a high-temperature firing step at the peak temperatures of %735, 790, and 850 C, respectively. In Figure 2a, the dynamics of the BH pairs during a subsequent dark anneal at T DA 220 C is displayed. The firing temperature is represented by the color, whereas the symbols indicate the refractive index of the SiN x :H. The long-term limit was used as the reference point for calculating BH, so all curves approach zero for long times. The overall behavior of the samples is quite comparable: A concentration of 0.7-2 10 14 cm 3 BH pairs is already present after firing. During dark annealing, BH pairs form from H 2A according to Equation, resulting in a rise of BH. The maximum concentration reached after a couple of hours at 220 C varies from less than 1.7 10 14 cm 3 to almost 11 10 14 cm 3 where higher concentrations are reached with higher firing temperatures. The different compositions of the SiN x :H layers (characterized by different refractive indices) seem to play a minor role only, which is in contrast to previous reports. However, one should note that different PECVD tools (here: direct plasma, and the study by Bredemeier et al.: remote plasma) were used, and layers might differ more in their microscopic structure than what is revealed in the refractive index. The maximum concentration of BH pairs BH max is composed of the initial concentration BH t0 h and the share BH DA, which indicates how many additional BH pairs form during dark annealing The initial concentration BH t0 h contains information about the post-firing state and is independent of the dark anneal. BH DA, on the other hand, depends on both the initial concentration of H 2A and the dark anneal during which the reaction H 2A ! BH takes place. To separate these influences, the respective dependence of the two quantities on the firing temperature will be considered individually. In the upper graph of Figure 2b, it is shown that BH t0 h (brown) doubles from 730 to 850 C, whereas BH DA (blue) increases almost tenfold from 1 10 14 to 9 10 14 cm 3. Thus, at higher firing temperatures, significantly more hydrogen is introduced into the Si volume, which is in accordance with a previous study. Directly after firing, most Bare Samples An important question to ask is whether the incorporation of hydrogen from the SiN x :H layer really is the only source of hydrogen, which is found inside the wafer later on. Taking the idea of the origin of hydrogen far back in the history of sample fabrication, the question arises whether Si wafers are free of hydrogen in the as-delivered state from the manufacturer. The FZ-Si wafers used in this section did not receive any processing steps before electrical contacting apart from a firing step while one wafer was left unfired as a reference. This reference wafer (black stars in Figure 3) shows a slight increase in BH pair concentration after an initial drop. After %2 h of dark annealing at 220 ∘ C, the dissociation reaction of the BH pairs predominates, and BH declines again. Thus, it can be followed that there is already an initial hydrogen concentration of nearly 3 10 14 cm 3 in the purchased FZ-Si wafer, primarily in the form of BH pairs. It is unclear, whether the initial drop from the first to the second measurement point is related to BH pairs or not. It is a reproducible feature, which will be discussed in more detail in the next section. As it is not describable by the three-state system introduced earlier, the first measurement value is ignored for the fitting procedure for the time being. The amount of hydrogen in this sample seems surprisingly high, as it is even more than a wafer contains, which was passivated with hydrogen-rich SiN x :H and fired at about 740 ∘ C (see Figure 2). As the same starting material was used, it is conceivable that initially present hydrogen effuses during SiN x :H deposition at a temperature of 400 C. Alternatively, if conversion to an energetically more favorable binding state (e.g., H 2C ) or bonding to other impurities occurs, this hydrogen fraction would have escaped observation by the methodology used here. If such a bare wafer is fired in a belt furnace, the initial concentration of BH pairs is reduced significantly (red, orange, and yellow squares in Figure 3). During the subsequent dark anneal, BH pairs form again. The maximum amplitude of BH pairs is 2.8 10 14 cm 3 for both the unfired sample and the sample fired at 533 C. This suggests that no hydrogen is lost from the H 2A ⇌ BH system. As the firing temperature increases, both the initial concentration of BH pairs and the maximum amplitude decrease. The former suggests that with higher peak temperature, less BH pairs form again during the cool down, or more BH pairs were dissociated in the first place. The latter implies that increasingly more hydrogen is lost from the H 2A ⇌ BH system-either by effusion, conversion to H 2C or bonding to other impurities. To have a closer look at changes in the hydrogen system, the quantities BH t0 h and BH DA are plotted again as a function www.advancedsciencenews.com www.pss-a.com of firing temperature in Figure 3b. The initial BH pair concentration decreases linearly with increasing firing temperature, whereas BH DA shows a sigmoidal decrease with strongest reduction in the range around 700 C. The ratios with respect to A 2 illustrate that a firing step around 600 C can maximize the ratio H 2A /BH in the initial state. However, in doing so, the total amount of hydrogen in the system H 2A ⇌ BH is already decreasing. At the highest firing temperature (814 C), a total of only 0.2 10 14 cm 3 hydrogen remains and is almost completely paired to boron after firing. The change of the ratios with firing temperature is exactly opposite to that of samples with a SiN x :H layer. This can be explained by the inverse dependence of the total hydrogen content on the firing temperature of the two sample systems with and without a SiN x :H layer, respectively. In Figure 4, the initial fraction of BH pairs from Figure 2 and 3 relative to the hydrogen content within the H 2A ⇌ BH system is plotted. Calculations from Voronkov and Falster (black line) show a good agreement apart from some scattering, although they used a slightly different doping (1 10 16 cm 3 instead of 1.5 10 16 cm 3 ). All these observations can be explained coherently if the as-purchased wafers already contain a concentration of about 3 10 14 cm 3 hydrogen, with around 80% present as BH pairs and 20% as H 2A. Subsequent processing steps at elevated temperature change this configuration. Samples with a SiN x :H layer and fired at T F 730 C interestingly show a lower hydrogen concentration within the H 2A ⇌ BH system compared with bare samples fired at around T F 500 600 C. One possible explanation could be the following: For passivated samples, the hydrogen initially present in the as-purchased wafer is, at some point prior to dark annealing brought into a configuration in which it no longer forms BH pairs during subsequent dark annealing and is, therefore, not observable with the methodology used in this article. Hydrogen effusion out of the wafer would be an alternative explanation for the observations. It can be expected that even lower firing temperatures would reduce the amount of hydrogen in the passivated samples even more. This would imply that some hydrogen is already lost from the H 2A ⇌ BH system during the SiN x :H deposition. The question now is where does this hydrogen in the as-purchased wafer originates from in the first place. In some cases, studies assume that wafers are free of hydrogen prior to explicit contact with hydrogen-containing atmospheres or hydrogen-rich layers. In fact, no formation of BH pairs is found in studies with unfired SiN x :H layers. However, the statement that a wafer is "hydrogen-free" must be treated with great caution (see the analysis in the study given by Hallam et al. ). There are reports in the literature that hydrogen incorporation into silicon is possible by wet chemical etching and cleaning steps. In a surface-near region, a rather high fraction of dopant atoms will then be deactivated. It should be noted again that the used methodology only gives an integral value of BH with respect to the wafer volume; thus, surface effects cannot be investigated. Though, after a firing step, it can be assumed that hydrogen concentration is rather uniform throughout the wafer. The presented results show that great care should be taken when investigating the role of hydrogen, because hydrogen can already be present in the wafer or introduced unintentionally, e.g., by standard wet-chemical processing steps. Temperature-Dependent BH Dynamics As shown earlier, a firing step with temperatures around 530 C dissolves existing BH pairs, whereas a treatment at 220 C initiates their formation. To gain more insight into the reaction dynamics, the dark annealing temperature has been varied between 200 and 290 C on bare samples. The results for samples treated with a high-temperature firing step with hydrogen predominantly existing as H 2A in the beginning are shown in Figure 5a. With increasing dark annealing temperatures, the amplitude of BH pairs lowers, and the formation occurs earlier. For temperatures above 240 C, a sharp increase occurs for prolonged annealing after dissociation of BH pairs, which is probably not related to the BH dynamics described previously. The steep rise of this still unknown effect shifts to lower time constants with increasing temperature, indicating that it is thermally activated. The trend of BH associated peaks getting shallower with higher temperatures reverses at 290 C. We, therefore, conclude that some change in BH dynamics occurs at this temperature, which causes the deviation from the trends found at lower temperatures. In the case of unfired samples with hydrogen mainly existing as BH in the beginning, the resulting measurements are displayed in Figure 5b. The behavior at the very beginning of the treatment is dominated by a fast drop in resistance, which is interpreted as a change in BH pair density based on the measurement analysis procedure. Whether this is actually the case and which physical cause is responsible for this cannot be answered on the basis of these data. Diffusion dynamics might play a role here, because hydrogen is expected to exist mainly close to the surface in these unfired samples, but a separate system completely. Initial fraction of BH pairs BH t0 h =A 2 after the firing step as a function of the total hydrogen content inside the H 2A ⇌ BH subsystem, which can be identified with the fit parameter A 2.The solid black line represents calculations made by Voronkov and Falster under the following conditions: N dop 1 10 16 cm 3, firing step with a peak temperature of 750 ∘ C and 100 Ks 1 cooling rate. The cooling ramp is comparable to the firing profile used here. www.advancedsciencenews.com www.pss-a.com unrelated to BH dynamics also changing hole concentration is conceivable as well. However, as the focus of this work is on the dynamics for longer times, this particular feature will not be evaluated further. This effect is followed by a local maximum that becomes flatter for rising temperatures and shifts to shorter times. As the maximum disappears for 290 C, the validity of the model below this temperature can also be shown here. If we examine the fitted time constants of Figure 5a more closely, we arrive at the representation in Figure 6. Shown are the inverse time constants t 1,2 for formation and dissociation of BH pairs versus inverse temperature. Note that this is a phenomenological approach, referring only to the effective reaction rates of the system. By assuming an Arrhenius-like dependence of the time constants, the effective activation energy of the formation of BH pairs E for 1.297 eV and of the dissociation E dis 1.2218 eV can be determined. For the regression, only values in the range of the solid line were included. Again, it can be stated here that above 280 C, a change takes place. The selection of the values included in the Arrhenius fit for the dissociation reaction was deliberately set narrow, because for higher temperatures, the dissociation is strongly influenced by the strong long-term increase described earlier. The time constants of the case of unfired samples (Figure 5b), on the other hand, are strongly influenced by the initial drop and, therefore, exhibit large uncertainties, which would make an analysis not very meaningful. Conclusion In this article, the dynamics of BH pairs in boron-doped FZ-Si during dark annealing is investigated using highly accurate resistance measurements. As-purchased, chemically polished FZ-Si wafers are found to contain a considerable amount of hydrogen, most of it paired to boron. Subsequent temperature treatment, e.g., a PECVD process or a firing process, changes the state of this hydrogen. A short high-temperature firing step applied to Si wafers coated with SiN x :H introduces additional hydrogen into the wafer volume. After firing, it is mainly in the form of H 2A, but some BH pairs are also present. If a firing step with a peak temperature of around Figure 5. a) Calculated change in BH concentration for fired samples under a dark annealing procedure. A higher dark annealing temperature leads to lower amplitudes and an accelerated dynamics. Change in BH concentration is calculated with respect to the initial resistance measurement. b) Calculated change in BH concentration for bare, unfired samples. The amplitude of the local maximum decreases with increasing temperature. Change in BH concentration is calculated with respect to the initial resistance measurement, and the lines are fits according to Equation. www.advancedsciencenews.com www.pss-a.com 530 C is applied to bare, unpassivated Si wafers, most of the initially present BH pairs split up and form H 2A while almost no hydrogen is lost from the H 2A ⇌ BH system. With higher firing temperatures, more and more hydrogen is lost from this system. As a consequence, the share of BH pairs increases, which is in accordance with theory. Furthermore, it could be shown that with increasing dark annealing temperature, the BH dynamics accelerates, whereas the maximum BH concentration reached decreases. The three-state model used to mathematically describe BH dynamics shows very good agreement with measurement data for both bare and passivated silicon, as well as for different annealing temperatures. Effective activation energies for formation and dissociation of BH pairs are determined to be 1.29 and 1.22 eV, respectively. For temperatures above 280 C, significant changes in the reaction dynamics occur, which manifests itself in a deviation of the time constants from Arrhenius behavior. Besides BH pair formation and dissociation, which is well described in the model used, two additional thermally activated effects were found to have an impact on hole concentration. The first one happens rather fast and is occurring predominantly in unfired wafers only. The second occurs for prolonged annealing times and overlaps with the dissociation reaction for temperatures above 240 C. |
# -*- coding: utf-8 -*-
# (c) Copyright 2021 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_svm41.response_types import Humidity, Temperature
from sensirion_shdlc_svm41.device_errors import \
Svm41CommandNotAllowedInCurrentState
import pytest
import time
@pytest.mark.needs_device
def test(device):
"""
Test if read_measured_values_raw() returns the expected values.
"""
device.start_measurement()
time.sleep(1.)
# check the read values
raw_humidity, raw_temperature, raw_voc_ticks, raw_nox_ticks = \
device.read_measured_values_raw()
# raw types
assert type(raw_humidity) is Humidity
assert type(raw_humidity.ticks) is int
assert type(raw_humidity.percent_rh) is float
assert type(raw_temperature) is Temperature
assert type(raw_temperature.ticks) is int
assert type(raw_temperature.degrees_celsius) is float
assert type(raw_temperature.degrees_fahrenheit) is float
assert type(raw_voc_ticks) is int
assert type(raw_nox_ticks) is int
# use default formatting for printing output:
print("temperature={}, humidity={}, [raw] voc ticks={}, [raw] nox ticks={}"
.format(raw_humidity, raw_temperature, raw_voc_ticks, raw_nox_ticks))
# stop the measurement and check for proper exception if called again
device.stop_measurement()
with pytest.raises(Svm41CommandNotAllowedInCurrentState):
device.read_measured_values_raw()
@pytest.mark.needs_device
def test_initial_state(device):
"""
Test if read_measured_values_raw() returns the expected
exception if called without starting the measurement first.
"""
with pytest.raises(Svm41CommandNotAllowedInCurrentState):
device.read_measured_values_raw()
|
import { ICommandHandler } from './command';
export interface ICommandRegistry {
registerCommand(commandId: string, handler: ICommandHandler): void;
getCommandHandler(commandId: string): ICommandHandler | undefined;
}
export class CommandRegistry implements ICommandRegistry {
protected commandsMap: Map<string, ICommandHandler> = new Map();
registerCommand(commandId: string, commandHandler: ICommandHandler) {
this.commandsMap.set(commandId, commandHandler);
}
getCommandHandler(commandId: string) {
return this.commandsMap.get(commandId);
}
}
|
<filename>application/common/state/search/__tests__/duck.test.ts
import {
INITIAL_STATE,
SEARCH_CHANGE_TERM,
SEARCH_RESOURCE_NAME
} from '../constants';
import { changeSearchTerm, reducerSearch, reducer } from '../duck';
describe('changeSearchTerm', function fnDescribe() {
it(`should have a type of ${SEARCH_CHANGE_TERM}`, function fnIt() {
expect(changeSearchTerm().type).toEqual(SEARCH_CHANGE_TERM);
});
it('should pass on the isHeaderFixed value we pass in', function fnIt() {
const term = 'foo';
expect(changeSearchTerm(term).term).toEqual(term);
});
});
describe('reducer', function fnDescribe() {
it('should return the initial state', function fnIt() {
expect(reducer(undefined, {})).toEqual(INITIAL_STATE);
});
it(`should react to an action with the type ${SEARCH_CHANGE_TERM}`, function fnIt() {
expect(
reducer(undefined, {
type: SEARCH_CHANGE_TERM,
term: 'foo'
})
).toMatchSnapshot();
});
it(`should return the current state if ${SEARCH_CHANGE_TERM} payload is empty`, function fnIt() {
expect(
reducer(undefined, {
type: SEARCH_CHANGE_TERM
})
).toMatchSnapshot();
});
});
describe('reducerSearch', function fnDescribe() {
it(`should have a key of ${SEARCH_RESOURCE_NAME}`, function fnIt() {
expect(reducerSearch).toEqual(
expect.objectContaining({
[SEARCH_RESOURCE_NAME]: reducer
})
);
});
});
|
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 1997-2010 Oracle and/or its affiliates. All rights reserved.
*
* Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common
* Development and Distribution License("CDDL") (collectively, the
* "License"). You may not use this file except in compliance with the
* License. You can obtain a copy of the License at
* http://www.netbeans.org/cddl-gplv2.html
* or nbbuild/licenses/CDDL-GPL-2-CP. See the License for the
* specific language governing permissions and limitations under the
* License. When distributing the software, include this License Header
* Notice in each file and include the License file at
* nbbuild/licenses/CDDL-GPL-2-CP. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the
* License Header, with the fields enclosed by brackets [] replaced by
* your own identifying information:
* "Portions Copyrighted [year] [name of copyright owner]"
*
* Contributor(s):
*
* The Original Software is NetBeans. The Initial Developer of the Original
* Software is Sun Microsystems, Inc. Portions Copyright 1997-2007 Sun
* Microsystems, Inc. All Rights Reserved.
*
* If you wish your version of this file to be governed by only the CDDL
* or only the GPL Version 2, indicate your decision by adding
* "[Contributor] elects to include this software in this distribution
* under the [CDDL or GPL Version 2] license." If you do not indicate a
* single choice of license, a recipient has the option to distribute
* your version of this file under either the CDDL, the GPL Version 2 or
* to extend the choice of license to its licensees as provided above.
* However, if you add GPL Version 2 code and therefore, elected the GPL
* Version 2 license, then the option applies only if the new code is
* made subject to such option by the copyright holder.
*/
package org.netbeans.modules.form.menu;
import java.awt.event.KeyAdapter;
import java.awt.event.KeyEvent;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
import org.netbeans.modules.form.InPlaceEditLayer;
import org.netbeans.modules.form.RADComponent;
import org.netbeans.modules.form.RADVisualComponent;
import org.netbeans.modules.form.RADVisualContainer;
/**
* Handles navigation of menu items using the keyboard
*
* @author <EMAIL>
*/
public class KeyboardMenuNavigator extends KeyAdapter {
MenuEditLayer menuEditLayer;
private RADVisualContainer menuBarRAD;
private RADVisualContainer currentMenuRAD;
KeyboardFinishListener listener;
public KeyboardMenuNavigator(MenuEditLayer menuEditLayer) {
this.menuEditLayer = menuEditLayer;
configure();
}
public void setCurrentMenuRAD(RADVisualContainer currentMenuRAD) {
this.currentMenuRAD = currentMenuRAD;
this.menuBarRAD = getMenuBarRad(currentMenuRAD);
}
private RADVisualContainer getMenuBarRad(RADComponent comp) {
if(JMenuBar.class.isAssignableFrom(comp.getBeanClass())) {
return (RADVisualContainer) comp;
}
if(comp.getParentComponent() == null) return null;
return getMenuBarRad(comp.getParentComponent());
}
public void configure() {
listener = new KeyboardFinishListener();
menuEditLayer.formDesigner.getInPlaceEditLayer().addFinishListener(listener);
}
public void unconfigure() {
menuEditLayer.formDesigner.getInPlaceEditLayer().removeFinishListener(listener);
}
@Override
public void keyPressed(KeyEvent e) {
if(e.getKeyCode() == KeyEvent.VK_DOWN) {
selectOffsetMenuItem(+1);
}
if(e.getKeyCode() == KeyEvent.VK_UP) {
selectOffsetMenuItem(-1);
}
if(e.getKeyCode() == KeyEvent.VK_LEFT) {
selectOffsetMenu(-1);
}
if(e.getKeyCode() == KeyEvent.VK_RIGHT) {
selectOffsetMenu(+1);
}
if(e.getKeyCode() == KeyEvent.VK_SPACE) {
startEditing();
}
// #116961: start inplace editing when F2 key is pressed on a menu
if(e.getKeyCode() == KeyEvent.VK_F2) {
startEditing();
}
//we aren't getting tabs for some reason
if(e.getKeyCode() == KeyEvent.VK_A) {
if(e.isShiftDown()) {
selectNextMenuItem(-1);
} else {
selectNextMenuItem(+1);
}
}
}
private void selectNextMenuItem(int offset) {
//josh: do nothing here until i figure out why tab events aren't being called
if(currentMenuRAD == null) return;
if(!menuEditLayer.isComponentSelected()) {
menuEditLayer.setSelectedRADComponent(currentMenuRAD.getSubComponent(0));
}
RADComponent selectedRADComponent = menuEditLayer.getSingleSelectedComponent();
//if menu, descend into the menu
if(isJMenu(selectedRADComponent) && offset == +1) {
RADVisualContainer newMenu = (RADVisualContainer) selectedRADComponent;
if(newMenu.getSubComponents().length > 0) {
currentMenuRAD = newMenu;
selectOffsetMenuItem(offset);
return;
}
}
//if already at the end of this menu
if(isLastItem(selectedRADComponent, currentMenuRAD) && offset == +1) {
goUpOneLevelAndNext();
return;
}
if(isFirstItem(selectedRADComponent, currentMenuRAD) && offset == -1) {
goUpOneLevel();
return;
}
selectOffsetMenuItem(offset);
}
// select the next menu item offset from the current one.
// pass in -1 and +1 to do prev and next menu items
private void selectOffsetMenuItem(int offset) {
if(currentMenuRAD == null) return;
if(currentMenuRAD.getSubComponents().length == 0) {
menuEditLayer.setSelectedRADComponent(null);//Component((JComponent)null);
return;
}
if(!menuEditLayer.isComponentSelected()) {
menuEditLayer.setSelectedRADComponent(currentMenuRAD.getSubComponent(0));
return;
}
int index = currentMenuRAD.getIndexOf(menuEditLayer.getSingleSelectedComponent());
if(index+offset >=0 && index+offset < currentMenuRAD.getSubComponents().length) {
menuEditLayer.setSelectedRADComponent(currentMenuRAD.getSubComponent(index+offset));
} else {
if(index >= 0 && index < currentMenuRAD.getSubComponents().length) {
menuEditLayer.setSelectedRADComponent(currentMenuRAD.getSubComponent(index));
}
}
}
private boolean isJMenu(RADComponent comp) {
return menuEditLayer.formDesigner.getComponent(comp) instanceof JMenu;
}
// select the next menu offset from the current one
// pass in -1 and + 1 to do prev and next menu items
private void selectOffsetMenu(int offset) {
//clear the selected component
//menuEditLayer.setSelectedComponent(null);
//if the current component is a JMenu
if(isJMenu(menuEditLayer.getSingleSelectedComponent())) {
RADVisualContainer menuRAD = (RADVisualContainer) menuEditLayer.getSingleSelectedComponent();//selectedRADComponent;
// make it's first element be highlighted
if(menuRAD.getSubComponents() != null &&
menuRAD.getSubComponents().length > 0 &&
menuRAD.getSubComponent(0) != null) {
RADVisualComponent firstItemRad = menuRAD.getSubComponent(0);
// open the menu
menuEditLayer.showMenuPopup((JMenu)menuEditLayer.formDesigner.getComponent(menuEditLayer.getSingleSelectedComponent()));//selectedRADComponent));
menuEditLayer.setSelectedRADComponent(firstItemRad);
currentMenuRAD = menuRAD;
return;
}
}
// if not a toplevel menu
int index = menuBarRAD.getIndexOf(currentMenuRAD);
if(index < 0) {
// if left then head back up the heirarchy
if(offset < 0) {
goUpOneLevel();
return;
}
// if right then switch to the next a full toplevel menu
if(offset > 0) {
currentMenuRAD = getTopLevelMenu(currentMenuRAD);
index = menuBarRAD.getIndexOf(currentMenuRAD);
// now continue on as normal
}
}
// set the current to the new one
index = index+offset;
// wrap around if necessary
if(index <0) {
index = menuBarRAD.getSubComponents().length-1;
}
if(index >= menuBarRAD.getSubComponents().length) {
index = 0;
}
currentMenuRAD = (RADVisualContainer) menuBarRAD.getSubComponent(index);
// show the new current menu
JMenu menu = (JMenu) menuEditLayer.formDesigner.getComponent(currentMenuRAD);
menuEditLayer.openMenu(currentMenuRAD,menu);
// set the first item as selected
if(currentMenuRAD.getSubComponents().length > 0) {
menuEditLayer.setSelectedRADComponent(currentMenuRAD.getSubComponents()[0]);
}
}
private void goUpOneLevel() {
menuEditLayer.setSelectedRADComponent(currentMenuRAD);
currentMenuRAD = currentMenuRAD.getParentContainer();
}
private void goUpOneLevelAndNext() {
menuEditLayer.setSelectedRADComponent(currentMenuRAD);
currentMenuRAD = currentMenuRAD.getParentContainer();
if(isLastItem(menuEditLayer.getSingleSelectedComponent(), currentMenuRAD)) {
goUpOneLevelAndNext();
return;
} else {
selectOffsetMenuItem(+1);
}
}
private boolean isFirstItem(RADComponent comp, RADVisualContainer cont) {
int index = cont.getIndexOf(comp);
if(index == 0) return true;
return false;
}
private boolean isLastItem(RADComponent comp, RADVisualContainer cont) {
int index = cont.getIndexOf(comp);
if(index == cont.getSubComponents().length-1) {
return true;
}
return false;
}
private RADVisualContainer getTopLevelMenu(RADVisualContainer currentMenuRAD) {
if(menuBarRAD.getIndexOf(currentMenuRAD) >= 0) {
return currentMenuRAD;
}
return getTopLevelMenu(currentMenuRAD.getParentContainer());
}
private void startEditing() {
menuEditLayer.configureEditedComponent(menuEditLayer.getSingleSelectedComponent());//selectedRADComponent);
menuEditLayer.formDesigner.startInPlaceEditing(menuEditLayer.getSingleSelectedComponent());//selectedRADComponent);
}
private class KeyboardFinishListener implements InPlaceEditLayer.FinishListener {
@Override
public void editingFinished(boolean changed) {
if(menuEditLayer.isVisible()) {
menuEditLayer.glassLayer.requestFocusInWindow();
}
}
}
}
|
Turkey is upset over US collaboration with the Kurds in Syria as it feels it will empower the minority, says author/historian Gerald Horne. There’s also the question of Qatar, which Turkey has supported and President Trump has been attacking furiously.
Recep Tayyip Erdogan issued a furious response to Washington after members of his security team were charged with assaulting protesters in the US capital during his official visit there last month.
RT: Erdogan has said he will “fight politically and judicially” against the arrest warrants. Do you think he would have any chance of winning that battle in the American courts?
Turkey furious after US charges Erdogan's guards with assault https://t.co/Dkg8LRfFT8 — RT (@RT_com) June 16, 2017
Gerald Horne: I don’t think so, because you have to realize that US-Turkish relations are deteriorating rapidly. First of all, there is the Kurdish question. As you well know, the US is collaborating with the Kurdish population in Syria, supposedly to attack the [Islamic State or IS, formerly] ISIS forces in Raqqa. Turkey is upset because it feels it will empower simultaneously its Kurdish minority.
Secondly there is a question of Qatar, the small Persian Gulf monarchy which has been subjected to air, sea and land embargo by Saudi Arabia and Egypt, not least. Turkey has moved to support Qatar and has sent 3,000 troops, Turkish troops, to that monarchy to stave off in the wild actions by the Saudis and their Egyptian comrades. At the same time President Donald Trump has been attacking Qatar furiously.
But as a footnote I should mention that Qatar has just arranged to buy $12 billion in US fighter jets, and that is a kind of bribe to Washington that may stay the hand of Trump, but I don’t think it will keep Turkish-US relations from deteriorating ever more rapidly.
US sells $12bn worth of fighter jets to ‘terrorist funder’ Qatar https://t.co/ZSFz1m5pWI — RT (@RT_com) June 16, 2017
RT: The Turkish embassy insists that the guards were acting in self-defense against the protesters. Judging by the videos of the incident, does that sound convincing to you?
GH: I must say that the Turkish security seemed to be rather aggressive in attacking the demonstrators. At the same time, keep in mind that it was in July, 2016, that President Erdogan was subjected to a military coup. He is very skittish right now about his security, because apparently the coup plotters planned to assassinate him. Apparently the Turkish authorities have reason to believe that the demonstrators in Washington, DC, were somehow connected to the coup plotters, and that may help to shed light to their excessive approach in response to these demonstrators.
RT: The dispute has already derailed a $1.2 million small-arms sale to Turkish security forces, which was expected to be approved by the US State Department last month. Could there be further repercussions?
GH: I’m afraid so. First of all, there is the US airbase that’s in Turkey that is used for actions in both Iraq and Syria. It may be up for negotiations. Keep in mind that already German troops that have been stationed at that same airbase in Turkey have come to a kind of road block in terms of getting access. That is to say, German politicians getting access to German troops at this base. That helps to suggest that probably US-Turkish relations would deteriorate ever more rapidly. |
City hospitals: the undercare of the underprivileged As the author has written, it is the modern developments in medicine which have completely displaced the use of pomanders for therapeutic and prophylactic purposes, but at one time the matter was quite otherwise. From at least the days of the Ebers papyrus (c. 1600 BC) perfumes have been used for disinfection, doctors have exercised their wits on the use of scented materials in medicine, and pharmacists on the best method of presentation. Certainly by the Middle Ages, the globular or "apple-shape" was decided upon as one of the most desirable forms, and it was from these that the beautiful pomanders, true works of art, of the sixteenth and seventeenth centuries are descended. Of particular interest to pharmacists is the section on ingredients; from 125 recipes, ninety different drugs are listed and the percentage frequency of use given. Rose flowers, red or white, figure in seventy-eight per cent of the formulae, styrax in an overwhelming ninety-nine per cent, and it is not surprising to find ambergris in sixty per cent; equally, to modern tastes, it is unsurprising that Succus valerianae is to be found in a mere one per cent. Recipes for pomanders are drawn from many countries: Italy, France, Germany, the Arab lands, and England; including one which ends with the helpful advice, "This, if your breath be not too valiant, will make you smell as sweet as my lady's dog." The book has 106 illustrations of pomanders, well over half of which, it is interesting to note, are from this country, no less than twenty-six from the Wellcome Museum. Some of the illustrations, however, do not show the fine workmanship of the pomanders as clearly as one would like. In a world of rising costs this is always a problem, but possibly fewer pictures on an art gloss paper would have been better. There is a fine bibliography, and the book may certainly be taken as a definitive work on pomanders. This book is yet another worthwhile addition to the series "Quellen und Studien zur Geschichte der Pharmazie" (Bd. 21), which is proving so useful to pharmaceutical historians. One minor defect in this volume is the lack of underlining of even major headings and titles, which does not make for easy identification of particular aspects of the subject. Hospital history is burgeoning, as medical historians turn to the social history of the nineteenth and twentieth centuries, studying not |
RANKL, OPG and CTR mRNA expression in the temporomandibular joint in rheumatoid arthritis. The calcitonin receptor (CTR) and receptor activator of nuclear factor B ligand (RANKL) have been found to be involved in the differentiation of osteoclasts. The association between the RANKL:osteoprotegerin (OPG) expression ratio and the pathogenesis of bone-destructive rheumatoid arthritis (RA) has been described in several joints, but the available data for the temporomandibular joint (TMJ) are limited. The aim of the present study was to investigate the involvement of osteoclasts at sites of bone erosion by determining the CTR expression and the RANKL:OPG expression ratio in the TMJ in a collagen-induced arthritis (CIA) model. Forty-eight male Wistar rats were randomly divided into two groups: Control group, injected with saline solution for 6 weeks; and CIA group, injected with emulsion. The RANKL and OPG mRNA expression was significantly increased in immunized rats compared with that in non-immunized rats. The RANKL:OPG expression ratio on the trabecular bone surface was 9.0 and 6.4 in the CIA group at weeks 4 and 6, respectively, while the RANKL:OPG expression ratio in the controls was 1.0:2. CTR mRNA expression was significantly upregulated in immunized rats compared with that in non-immunized rats; the level of CTR mRNA in the CTR-positive osteoclasts on the trabecular bone surface was 10.9- and 7.8-fold higher in the CIA rats than that in the control rats at weeks 4 and 6, respectively. In conclusion, focal bone destruction in an experimental model of arthritis in the TMJ can be attributed to cells expressing CTR, a defining feature of osteoclasts. The expression of RANKL and OPG mRNA within the inflamed synovium provides an insight into the mechanism of osteoclast differentiation and function at the border of bone erosion in arthritis. |
A PROBLEM HERE . . .
THE EXPECTED ENTRY of former state Attorney General Robert Shevin into the Democratic primary in the governor's race is worrying Oscar Juarez, campaign manager of GOP candidate Lou Frey Jr. "It hurts the Republicans," says Juarez, who is worried that Shevin's candidacy hurts Democratic front- runner Steve Pajcic the most and makes it more likely that Senate President Harry Johnston will get the nomination. Juarez and others among the GOP would prefer to run against Pajcic, whose voting record they perceive to be more liberal than any other Democrat in the race. |
Establishment media attacks Paul after confrontation with Bachmann
Paul Joseph Watson
Infowars.com
Friday, December 16, 2011
Following Ron Paul’s clash with Michele Bachmann over foreign policy during last night’s Republican debate in Iowa, the establishment media characterized Paul’s views as an “outburst” that could cost him votes, when in reality the majority of Republican voters now want U.S. troops brought home, troops who themselves support Paul over every other GOP candidate.
Ron Paul has has received more money in donations from active duty military personnel than all of the other Republican candidates combined and more than Barack Obama himself.
“Paul’s military-connected contributions for the three months more than double such contributions to all the other Republican presidential candidates—and they also exceed Obama’s,” confirms Politifact.
In the three months from April through June, Paul received “more than $25,000 from individuals who listed their employer as a branch of the military” (the campaign itself puts the figure closer to $35,000). In comparison, Michele Bachmann received just $2,250.
“We know without a shadow of a doubt that Iran will take a nuclear weapon. They will use it to wipe our friend Israel off the map, and they would use it against the United States of America,” said Bachmann during last night’s debate, presumably unaware of the fact that Israel has as many as 400 nuclear weapons, backed by the United States which maintains an arsenal of 5,113 warheads, and could turn Iran into a parking lot overnight.
Bachmann then labeled Paul’s refusal to back an unconstitutional pre-emptive strike on Iran as “dangerous for American security”.
Bachmann’s characterization of Ron Paul’s constitutional, non-interventionist, founding father-inspired foreign policy as “dangerous” is not a view shared by active duty U.S. troops, because as Paul campaign manager Jesse Benton points out, “They look at Ron Paul and see a leader who takes their oath seriously and who will fight to ensure that we don’t misrepresent that oath by sending them off to police the world, instead of defending our country.”
To cast Paul’s opposition to neo-liberal interventionist wars which have intensified under Barack Obama as somehow unconservative represents more dirty tricks on behalf of the establishment press. The Texan Congressman’s views are completely in line with the founding fathers, who also advocated non-interventionism.
A d v e r t i s e m e n t
{openx:74}
“It is not we non-interventionists who are isolationsists,” Paul explains in his article I Advocate the Same Foreign Policy the Founding Fathers Would. “The real isolationists are those who impose sanctions and embargoes on countries and peoples across the globe because they disagree with the internal and foreign policies of their leaders. The real isolationists are those who choose to use force overseas to promote democracy, rather than seek change through diplomacy, engagement, and by setting a positive example.”
Not only is Ron Paul’s foreign policy stance backed by U.S. troops and other military workers, aspects of it also shared by the majority of Republican voters.
After Obama rapidly accelerated an interventionist foreign policy, with more troops deployed than at any time under Bush (Bachmann should love Obama), Republicans quickly began to lose their appetite for war.
A recent Rasmussen poll found that a slim majority of Republican voters now support pulling U.S. troops out of Afghanistan. In addition, the survey found that just 13 per cent of Republicans supported U.S. military action in Libya.
Perhaps Michele Bachmann should try asking active duty U.S. troops if they think Ron Paul’s views on foreign policy are “dangerous”. Judging by how their donations have flooded into Ron Paul’s campaign coffers, efforts to characterize Paul’s non-interventionist policy as a fringe viewpoint are clearly without any foundation whatsoever.
*********************
Paul Joseph Watson is the editor and writer for Prison Planet.com. He is the author of Order Out Of Chaos. Watson is also a regular fill-in host for The Alex Jones Show. |
<filename>nl/invisible/keygen/gui/MainApp.java
/* 1: */ package nl.invisible.keygen.gui;
/* 2: */
/* 3: */ import java.awt.Window;
/* 4: */ import org.jdesktop.application.Application;
/* 5: */ import org.jdesktop.application.SingleFrameApplication;
/* 6: */
/* 7: */ public class MainApp
/* 8: */ extends SingleFrameApplication
/* 9: */ {
/* 10: */ protected void startup()
/* 11: */ {
/* 12:19 */ show(new MainWindow(this));
/* 13: */ }
/* 14: */
/* 15: */ protected void configureWindow(Window root) {}
/* 16: */
/* 17: */ public static MainApp getApplication()
/* 18: */ {
/* 19:35 */ return (MainApp)Application.getInstance(MainApp.class);
/* 20: */ }
/* 21: */
/* 22: */ public static void main(String[] args)
/* 23: */ {
/* 24:42 */ launch(MainApp.class, args);
/* 25: */ }
/* 26: */ }
/* Location: C:\Users\xi\Desktop\confluence_keygen\confluence_keygen.jar
* Qualified Name: nl.invisible.keygen.gui.MainApp
* JD-Core Version: 0.7.0.1
*/ |
/*
* Copyright 2020 ICON Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iiss
import (
"fmt"
"math/big"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/icon-project/goloop/common"
"github.com/icon-project/goloop/common/db"
"github.com/icon-project/goloop/common/errors"
"github.com/icon-project/goloop/common/log"
"github.com/icon-project/goloop/icon/icmodule"
"github.com/icon-project/goloop/icon/iiss/icreward"
"github.com/icon-project/goloop/icon/iiss/icstage"
"github.com/icon-project/goloop/icon/iiss/icstate"
)
func MakeCalculator(database db.Database, back *icstage.Snapshot) *Calculator {
c := new(Calculator)
c.back = back
c.base = icreward.NewSnapshot(database, nil)
c.temp = c.base.NewState()
c.log = log.New()
return c
}
func TestCalculator_processClaim(t *testing.T) {
database := db.NewMapDB()
front := icstage.NewState(database)
addr1 := common.MustNewAddressFromString("hx1")
addr2 := common.MustNewAddressFromString("hx2")
v1 := int64(100)
v2 := int64(200)
type args struct {
addr *common.Address
value *big.Int
}
tests := []struct {
name string
args args
want int64
}{
{
"Add Claim 100",
args{
addr1,
big.NewInt(v1),
},
v1,
},
{
"Add Claim 200",
args{
addr2,
big.NewInt(v2),
},
v2,
},
}
// initialize data
c := MakeCalculator(database, nil)
for _, tt := range tests {
args := tt.args
// temp IScore : args.value * 2
iScore := icreward.NewIScore(new(big.Int).Mul(args.value, big.NewInt(2)))
err := c.temp.SetIScore(args.addr, iScore)
assert.NoError(t, err)
// add Claim : args.value
_, err = front.AddIScoreClaim(args.addr, args.value)
assert.NoError(t, err)
}
c.back = front.GetSnapshot()
err := c.processClaim()
assert.NoError(t, err)
// check result
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
args := tt.args
iScore, err := c.temp.GetIScore(args.addr)
assert.NoError(t, err)
assert.Equal(t, 0, args.value.Cmp(iScore.Value()))
})
}
}
func TestCalculator_processBlockProduce(t *testing.T) {
addr0 := common.MustNewAddressFromString("hx0")
addr1 := common.MustNewAddressFromString("hx1")
addr2 := common.MustNewAddressFromString("hx2")
addr3 := common.MustNewAddressFromString("hx3")
variable := big.NewInt(int64(YearBlock * icmodule.IScoreICXRatio))
rewardGenerate := variable.Int64()
rewardValidate := variable.Int64()
type args struct {
bp *icstage.BlockProduce
variable *big.Int
}
tests := []struct {
name string
args args
err bool
wants [4]int64
}{
{
name: "Zero Irep",
args: args{
icstage.NewBlockProduce(0, 0, new(big.Int).SetInt64(int64(0b0))),
new(big.Int),
},
err: false,
wants: [4]int64{0, 0, 0, 0},
},
{
name: "All voted",
args: args{
icstage.NewBlockProduce(0, 4, new(big.Int).SetInt64(int64(0b1111))),
variable,
},
err: false,
wants: [4]int64{
rewardGenerate,
rewardValidate / 3,
rewardValidate / 3,
rewardValidate / 3,
},
},
{
name: "3 P-Rep voted include proposer",
args: args{
icstage.NewBlockProduce(2, 3, new(big.Int).SetInt64(int64(0b0111))),
variable,
},
err: false,
wants: [4]int64{
rewardValidate / 2,
rewardValidate / 2,
rewardGenerate,
0,
},
},
{
name: "3 P-Rep voted exclude proposer",
args: args{
icstage.NewBlockProduce(2, 3, new(big.Int).SetInt64(int64(0b1011))),
variable,
},
err: false,
wants: [4]int64{
rewardValidate / 3,
rewardValidate / 3,
rewardGenerate,
rewardValidate / 3,
},
},
{
name: "Invalid proposerIndex",
args: args{
icstage.NewBlockProduce(5, 3, new(big.Int).SetInt64(int64(0b0111))),
variable,
},
err: true,
wants: [4]int64{0, 0, 0, 0},
},
{
name: "There is no validator Info. for voter",
args: args{
icstage.NewBlockProduce(5, 16, new(big.Int).SetInt64(int64(0b01111111111111111))),
variable,
},
err: true,
wants: [4]int64{0, 0, 0, 0},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
in := tt.args
vs := makeVS(addr0, addr1, addr2, addr3)
err := processBlockProduce(in.bp, in.variable, vs)
if tt.err {
assert.Error(t, err)
} else {
assert.NoError(t, err)
for i, v := range vs {
assert.Equal(t, tt.wants[i], v.IScore().Int64(), "index %d", i)
}
}
})
}
}
func makeVS(addrs ...*common.Address) []*validator {
vs := make([]*validator, 0)
for _, addr := range addrs {
vs = append(vs, newValidator(addr))
}
return vs
}
func TestCalculator_varForVotedReward(t *testing.T) {
tests := []struct {
name string
args icstage.Global
multiplier, divider int64
}{
{
"Global Version1",
icstage.NewGlobalV1(
icstate.IISSVersion2,
0,
100-1,
icmodule.RevisionIISS,
big.NewInt(YearBlock),
big.NewInt(200),
22,
100,
),
// multiplier = ((irep * MonthPerYear) / (YearBlock * 2)) * 100 * IScoreICXRatio
((YearBlock * MonthPerYear) / (YearBlock * 2)) * 100 * icmodule.IScoreICXRatio,
1,
},
{
"Global Version1 - disabled",
icstage.NewGlobalV1(
icstate.IISSVersion2,
0,
100-1,
icmodule.RevisionIISS,
big.NewInt(0),
big.NewInt(200),
22,
100,
),
0,
1,
},
{
"Global Version2",
icstage.NewGlobalV2(
icstate.IISSVersion3,
0,
1000-1,
icmodule.RevisionEnableIISS3,
big.NewInt(10000),
big.NewInt(50),
big.NewInt(50),
big.NewInt(0),
big.NewInt(0),
100,
5,
),
// variable = iglobal * iprep * IScoreICXRatio / (100 * TermPeriod)
10000 * 50 * icmodule.IScoreICXRatio,
100 * MonthBlock,
},
{
"Global Version2 - disabled",
icstage.NewGlobalV2(
icstate.IISSVersion3,
0,
-1,
icmodule.RevisionEnableIISS3,
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
0,
0,
),
0,
1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
multiplier, divider := varForVotedReward(tt.args)
assert.Equal(t, tt.multiplier, multiplier.Int64())
assert.Equal(t, tt.divider, divider.Int64())
})
}
}
func newVotedDataForTest(enable bool, delegated int64, bonded int64, bondRequirement int, iScore int64) *votedData {
voted := icreward.NewVoted()
voted.SetEnable(enable)
voted.SetDelegated(big.NewInt(delegated))
voted.SetBonded(big.NewInt(bonded))
voted.SetBondedDelegation(big.NewInt(0))
data := newVotedData(voted)
data.SetIScore(big.NewInt(iScore))
data.UpdateBondedDelegation(bondRequirement)
return data
}
func TestDelegatedData_compare(t *testing.T) {
d1 := newVotedDataForTest(true, 10, 0, 0, 10)
d2 := newVotedDataForTest(true, 20, 0, 0, 20)
d3 := newVotedDataForTest(true, 20, 0, 0, 21)
d4 := newVotedDataForTest(false, 30, 0, 0, 30)
d5 := newVotedDataForTest(false, 31, 0, 0, 31)
type args struct {
d1 *votedData
d2 *votedData
}
tests := []struct {
name string
args args
want int
}{
{
"x<y",
args{d1, d2},
-1,
},
{
"x<y,disable",
args{d5, d2},
-1,
},
{
"x==y",
args{d2, d3},
0,
},
{
"x==y,disable",
args{d4, d5},
0,
},
{
"x>y",
args{d3, d1},
1,
},
{
"x>y,disable",
args{d1, d4},
1,
},
}
for _, tt := range tests {
args := tt.args
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, args.d1.Compare(args.d2))
})
}
}
func TestVotedInfo_setEnable(t *testing.T) {
totalVoted := new(big.Int)
vInfo := newVotedInfo(100)
status := icstage.ESDisablePermanent
for i := int64(1); i < 6; i += 1 {
status = status % icstage.ESMax
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", i))
data := newVotedDataForTest(status.IsEnabled(), i, i, 1, 0)
vInfo.AddVotedData(addr, data)
if status.IsEnabled() {
totalVoted.Add(totalVoted, data.GetVotedAmount())
}
}
assert.Equal(t, 0, totalVoted.Cmp(vInfo.TotalVoted()))
status = icstage.ESEnable
for key, vData := range vInfo.PReps() {
status = status % icstage.ESMax
addr, err := common.NewAddress([]byte(key))
assert.NoError(t, err)
if status.IsEnabled() != vData.Enable() {
if status.IsEnabled() {
totalVoted.Add(totalVoted, vData.GetVotedAmount())
} else {
totalVoted.Sub(totalVoted, vData.GetVotedAmount())
}
}
vInfo.SetEnable(addr, status)
assert.Equal(t, status, vData.Status())
assert.Equal(t, status.IsEnabled(), vData.Enable())
assert.Equal(t, 0, totalVoted.Cmp(vInfo.TotalVoted()))
}
addr := common.MustNewAddressFromString("hx123412341234")
vInfo.SetEnable(addr, icstage.ESDisablePermanent)
prep := vInfo.GetPRepByAddress(addr)
assert.Equal(t, false, prep.Enable())
assert.True(t, prep.IsEmpty())
assert.Equal(t, 0, prep.IScore().Sign())
assert.Equal(t, 0, totalVoted.Cmp(vInfo.TotalVoted()))
}
func TestVotedInfo_updateDelegated(t *testing.T) {
vInfo := newVotedInfo(100)
votes := make([]*icstage.Vote, 0)
enable := true
for i := int64(1); i < 6; i += 1 {
enable = !enable
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", i))
data := newVotedDataForTest(enable, i, i, 1, 0)
vInfo.AddVotedData(addr, data)
votes = append(votes, icstage.NewVote(addr, big.NewInt(i)))
}
newAddr := common.MustNewAddressFromString("hx321321")
votes = append(votes, icstage.NewVote(newAddr, big.NewInt(100)))
totalVoted := new(big.Int).Set(vInfo.TotalVoted())
vInfo.UpdateDelegated(votes)
for _, v := range votes {
expect := v.Amount().Int64() * 2
if v.To().Equal(newAddr) {
expect = v.Amount().Int64()
}
vData := vInfo.GetPRepByAddress(v.To())
assert.Equal(t, expect, vData.GetDelegated().Int64())
if vData.Enable() {
totalVoted.Add(totalVoted, v.Amount())
}
}
assert.Equal(t, 0, totalVoted.Cmp(vInfo.TotalVoted()))
}
func TestVotedInfo_updateBonded(t *testing.T) {
vInfo := newVotedInfo(100)
votes := make([]*icstage.Vote, 0)
enable := true
for i := int64(1); i < 6; i += 1 {
enable = !enable
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", i))
data := newVotedDataForTest(enable, i, i, 1, 0)
vInfo.AddVotedData(addr, data)
votes = append(votes, icstage.NewVote(addr, big.NewInt(i)))
}
newAddr := common.MustNewAddressFromString("hx321321")
votes = append(votes, icstage.NewVote(newAddr, big.NewInt(100)))
totalVoted := new(big.Int).Set(vInfo.TotalVoted())
vInfo.UpdateBonded(votes)
for _, v := range votes {
expect := v.Amount().Int64() * 2
if v.To().Equal(newAddr) {
expect = v.Amount().Int64()
}
vData := vInfo.GetPRepByAddress(v.To())
assert.Equal(t, expect, vData.GetBonded().Int64())
if vData.Enable() {
totalVoted.Add(totalVoted, v.Amount())
}
}
assert.Equal(t, 0, totalVoted.Cmp(vInfo.TotalVoted()))
}
func TestVotedInfo_SortAndUpdateTotalBondedDelegation(t *testing.T) {
d := newVotedInfo(100)
total := int64(0)
more := int64(10)
maxIndex := int64(d.MaxRankForReward()) + more
for i := int64(1); i <= maxIndex; i += 1 {
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", i))
data := newVotedDataForTest(true, i, 0, 0, i)
d.AddVotedData(addr, data)
if i > more {
total += i
}
}
d.Sort()
d.UpdateTotalBondedDelegation()
assert.Equal(t, total, d.TotalBondedDelegation().Int64())
for i, rank := range d.Rank() {
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", maxIndex-int64(i)))
assert.Equal(t, string(addr.Bytes()), rank)
}
}
func TestVotedInfo_calculateReward(t *testing.T) {
vInfo := newVotedInfo(100)
total := int64(0)
more := int64(10)
maxIndex := int64(vInfo.MaxRankForReward()) + more
for i := int64(1); i <= maxIndex; i += 1 {
addr := common.MustNewAddressFromString(fmt.Sprintf("hx%d", i))
data := newVotedDataForTest(true, i, 0, 0, 0)
vInfo.AddVotedData(addr, data)
if i > more {
total += i
}
}
vInfo.Sort()
vInfo.UpdateTotalBondedDelegation()
assert.Equal(t, total, vInfo.TotalBondedDelegation().Int64())
variable := big.NewInt(YearBlock)
divider := big.NewInt(1)
period := 10000
bigIntPeriod := big.NewInt(int64(period))
vInfo.CalculateReward(variable, divider, period)
for i, addrKey := range vInfo.Rank() {
expect := big.NewInt(maxIndex - int64(i))
if i >= vInfo.MaxRankForReward() {
expect.SetInt64(0)
} else {
expect.Mul(expect, variable)
expect.Mul(expect, bigIntPeriod)
expect.Div(expect, vInfo.TotalBondedDelegation())
}
assert.Equal(t, expect.Int64(), vInfo.PReps()[addrKey].IScore().Int64())
}
}
func TestCalculator_varForVotingReward(t *testing.T) {
type args struct {
global icstage.Global
totalVotingAmount *big.Int
}
type want struct {
multiplier int64
divider int64
}
tests := []struct {
name string
args args
want want
}{
{
"Global Version1",
args{
icstage.NewGlobalV1(
icstate.IISSVersion2,
0,
100-1,
icmodule.RevisionIISS,
big.NewInt(MonthBlock),
big.NewInt(20000000),
22,
100,
),
nil,
},
want{
RrepMultiplier * 20000000 * icmodule.IScoreICXRatio,
YearBlock * RrepDivider,
},
},
{
"Global Version1 - disabled",
args{
icstage.NewGlobalV1(
icstate.IISSVersion2,
0,
100-1,
icmodule.RevisionIISS,
big.NewInt(MonthBlock),
big.NewInt(0),
22,
100,
),
nil,
},
want{
0,
0,
},
},
{
"Global Version2",
args{
icstage.NewGlobalV2(
icstate.IISSVersion3,
0,
1000-1,
icmodule.RevisionEnableIISS3,
big.NewInt(10000),
big.NewInt(50),
big.NewInt(50),
big.NewInt(0),
big.NewInt(0),
100,
5,
),
big.NewInt(10),
},
// multiplier = iglobal * ivoter * IScoreICXRatio / (100 * TermPeriod, totalVotingAmount)
want{
10000 * 50 * icmodule.IScoreICXRatio,
100 * MonthBlock * 10,
},
},
{
"Global Version2 - disabled",
args{
icstage.NewGlobalV2(
icstate.IISSVersion3,
0,
0-1,
icmodule.RevisionIISS,
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
0,
0,
),
big.NewInt(10),
},
want{
0,
0,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
multiplier, divider := varForVotingReward(tt.args.global, tt.args.totalVotingAmount)
assert.Equal(t, tt.want.multiplier, multiplier.Int64())
assert.Equal(t, tt.want.divider, divider.Int64())
})
}
}
type testGlobal struct {
icstage.Global
iissVersion int
}
func (tg *testGlobal) GetIISSVersion() int {
return tg.iissVersion
}
func TestCalculator_VotingReward(t *testing.T) {
addr1 := common.MustNewAddressFromString("hx1")
addr2 := common.MustNewAddressFromString("hx2")
addr3 := common.MustNewAddressFromString("hx3")
addr4 := common.MustNewAddressFromString("hx4")
prepInfo := map[string]*pRepEnable{
string(addr1.Bytes()): {0, 0},
string(addr2.Bytes()): {10, 0},
string(addr3.Bytes()): {100, 200},
}
d0 := icstate.NewDelegation(addr1, big.NewInt(MinDelegation-1))
d1 := icstate.NewDelegation(addr1, big.NewInt(MinDelegation))
d2 := icstate.NewDelegation(addr2, big.NewInt(MinDelegation))
d3 := icstate.NewDelegation(addr3, big.NewInt(MinDelegation))
d4 := icstate.NewDelegation(addr4, big.NewInt(MinDelegation))
type args struct {
iissVersion int
multiplier int
divider int
from int
to int
delegating icstate.Delegations
}
tests := []struct {
name string
args args
want int64
}{
{
name: "Delegate too small in IISS 2.x",
args: args{
icstate.IISSVersion2,
100,
10,
0,
1000,
icstate.Delegations{d0},
},
want: 0,
},
{
name: "Delegate too small in IISS 3.x",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d0},
},
want: 100 * d0.Value.Int64() * 1000 / 10,
},
{
name: "PRep-full",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d1},
},
want: 100 * d1.Value.Int64() * 1000 / 10,
},
{
name: "PRep-enabled",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d2},
},
want: 100 * d2.Value.Int64() * (1000 - 10) / 10,
},
{
name: "PRep-disabled",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d3},
},
want: 100 * d3.Value.Int64() * (200 - 100) / 10,
},
{
name: "PRep-None",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d4},
},
want: 0,
},
{
name: "PRep-combination",
args: args{
icstate.IISSVersion3,
100,
10,
0,
1000,
icstate.Delegations{d1, d2, d3, d4},
},
want: (100*d1.Value.Int64()*1000)/10 +
(100*d2.Value.Int64()*(1000-10))/10 +
(100*d3.Value.Int64()*(200-100))/10,
},
}
calculator := new(Calculator)
calculator.log = log.New()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
args := tt.args
calculator.global = &testGlobal{iissVersion: args.iissVersion}
reward := calculator.votingReward(
big.NewInt(int64(args.multiplier)),
big.NewInt(int64(args.divider)),
args.from,
args.to,
prepInfo,
args.delegating.Iterator(),
)
assert.Equal(t, tt.want, reward.Int64())
})
}
}
func TestCalculator_WaitResult(t *testing.T) {
c := &Calculator{
startHeight: InitBlockHeight,
}
err := c.WaitResult(1234)
assert.NoError(t, err)
c = &Calculator{
startHeight: 3414,
}
err = c.WaitResult(1234)
assert.Error(t, err)
toTC := make(chan string, 2)
go func() {
err := c.WaitResult(3414)
assert.True(t, err == errors.ErrInvalidState)
toTC <- "done"
}()
time.Sleep(time.Millisecond*10)
c.setResult(nil, errors.ErrInvalidState)
assert.Equal(t, "done", <-toTC)
c = &Calculator{
startHeight: 3414,
}
go func() {
err := c.WaitResult(3414)
assert.NoError(t, err)
toTC <- "done"
}()
go func() {
err := c.WaitResult(3414)
assert.NoError(t, err)
toTC <- "done"
}()
time.Sleep(time.Millisecond*20)
mdb := db.NewMapDB()
rss := icreward.NewSnapshot(mdb, nil)
c.setResult(rss, nil)
assert.Equal(t, "done", <-toTC)
assert.Equal(t, "done", <-toTC)
assert.True(t, c.Result() == rss)
} |
<reponame>edomin/angles
#ifndef GAME_SEARCH_HPP
#define GAME_SEARCH_HPP
#include <vector>
#include "game/cell.hpp"
namespace game {
class Search {
public:
unsigned width;
unsigned height;
std::vector<unsigned> data;
Search(unsigned _width, unsigned _height);
~Search();
Search& operator=(const Search &other);
bool operator==(const Search &other) const;
void clear();
void set(const Cell &cell, unsigned value);
unsigned at(const Cell &cell) const;
unsigned at(unsigned row, unsigned col) const;
};
} // game::
#endif // GAME_SEARCHING_CELL_RECORD_HPP
|
package net.thevpc.nuts.runtime.standalone.xtra.hashname;
import net.thevpc.nuts.*;
import net.thevpc.nuts.runtime.standalone.util.CoreNutsUtils;
import net.thevpc.nuts.spi.NutsSupportLevelContext;
import java.nio.file.Path;
import java.nio.file.Paths;
public class DefaultNutsHashName implements NutsHashName {
private NutsSession session;
private Object source;
private String sourceType;
public DefaultNutsHashName(NutsSession session) {
this.session = session;
}
public String getWorkspaceHashName(String path) {
if (path == null) {
path = "";
}
String n;
String p;
if (path.contains("\\") || path.contains("/") || path.equals(".") || path.equals("..")) {
Path pp = Paths.get(path).toAbsolutePath().normalize();
n = pp.getFileName().toString();
p = pp.getParent() == null ? null : pp.getParent().toString();
} else {
n = path;
p = "";
}
if (p == null) {
return ("Root " + n).trim();
} else {
Path root = Paths.get(NutsUtilPlatforms.getWorkspaceLocation(
null,
false,
null
)).getParent().getParent();
if (p.equals(root.toString())) {
return n;
}
return (getHashName(p) + " " + n).trim();
}
}
@Override
public String getHashName(Object source) {
return getHashName(source, null);
}
@Override
public String getHashName(Object source, String sourceType) {
if (source == null) {
return "default";
} else if (source instanceof String) {
if ("workspace".equalsIgnoreCase(sourceType)) {
return getWorkspaceHashName(source.toString());
}
if (source.toString().isEmpty()) {
return "empty";
}
return getHashName(source.hashCode());
} else if (source instanceof NutsPath) {
if ("workspace".equalsIgnoreCase(sourceType)) {
return getWorkspaceHashName(source.toString());
}
return getHashName(source.hashCode());
} else if (source instanceof NutsWorkspace) {
NutsPath location = ((NutsWorkspace) source).getLocation();
return getWorkspaceHashName(location == null ? null : location.toString());
} else if (source instanceof NutsSession) {
NutsPath location = ((NutsSession) source).getWorkspace().getLocation();
return getWorkspaceHashName(location == null ? null : location.toString());
} else if (source instanceof Integer) {
int i = (int) source;
return CoreNutsUtils.COLOR_NAMES[Math.abs(i) % CoreNutsUtils.COLOR_NAMES.length];
} else {
return getHashName(source.hashCode());
}
}
@Override
public int getSupportLevel(NutsSupportLevelContext context) {
return DEFAULT_SUPPORT;
}
}
|
* Suspects wanted to commit holy war, police say
* NY Jews shocked to be target of purported attack
* Mayor, police commissioner greet worshipers
By Edith Honan
NEW YORK, May 21 (Reuters) - New York’s mayor and police chief sought to calm Jewish worshipers on Thursday, the morning after authorities said they foiled a plot to blow up two synagogues and simultaneously shoot down military planes.
Four men arrested in the suspected plot were due to appear in court later in the day in White Plains, New York. Police Commissioner Raymond Kelly said all four had criminal records and did not appear to be part of al Qaeda.
As they greeted worshipers at one of the targeted synagogues Thursday morning, Kelly and Mayor Michael Bloomberg conveyed calm following the latest threat to New York City, which has been on high alert for another attack since the Sept. 11 hijacked plane attacks of 2001.
The FBI and New York police arrested the four Muslim men on Wednesday night after they planted what they believed to be explosives in two cars — one parked outside each synagogue — and planned to head to an air base with what they thought was an activated stinger surface-to-air missile.
But the explosives were inert and the stinger deactivated as the four suspects had been infiltrated by an FBI informant who provided the fake weapons.
"They stated that they wanted to commit jihad," Kelly told reporters, using a term that can mean holy war. "They were disturbed about what was happening in Afghanistan and Pakistan, that Muslims were being killed. They made the statement that if Jews were killed in this attack that would be all right."
Worshipers at the Riverdale Jewish Center, an orthodox synagogue that had an early morning service, were shocked.
"It’s just unbelievable, unbelievable, that it’s here in this community," said Rose Spindler, who said she was a Holocaust survivor. "They should let us live. How can they come here and do that to innocent people? We were very lucky."
David Winter, the executive director of the Riverdale Jewish Center, said the possibility of an attack was "always in the back of your mind."
"We were shocked. The shock and being floored is followed by relief," Winter said.
The other target, the Riverdale Temple, is a reform synagogue.
The suspects were due in court on the day U.S. President Barack Obama was to speak on national security and outline his strategy for closing the U.S. prison at Guantanamo Bay where terrorism suspects are being held. [ID:nN20544607]
HOME-GROWN
None of the four suspects had any known connection to al Qaeda, Kelly said. One of them was of Haitian descent and the other three American-born.
"It speaks to our concern about home-grown terrorism ... that in many ways is the most difficult to address," Kelly said.
The two synagogues are in a wealthy area of the Bronx, just north of Manhattan and near a highway that leads upstate toward New York’s Air National Guard base at Stewart airport in Newburgh, where authorities said the men planned to shoot down planes with surface-to-air guided missiles.
The suspects were identified as James Cromitie, David Williams, Onta Williams and Laguerre Payen. Kelly called Cromitie, 53, the leader of the group. Two of the others were aged 29 and 33. Kelly said they may have converted to an extreme vision of Islam in jail, he said.
Each man is charged with one count of conspiracy to use weapons of mass destruction within the United States, which carries a maximum penalty of life in prison, and one count of conspiracy to acquire and use anti-aircraft missiles, which also carries a maximum penalty of life in prison.
All the men lived in Newburgh, about 60 miles (100 km) north of New York City, authorities said.
According to investigators, Cromitie said if he died a martyr, he would go to "paradise" and that he was interested in doing "something to America," the complaint said.
In October, Cromitie and the other men began a series of meetings at a house in Newburgh to plot their attacks and just last month they selected the synagogue and Jewish community center and conducted surveillance, it said.
The complaint said they bought an arsenal in May that included improvised explosive devices containing inert C-4 plastic explosives and a surface-to-air guided missile provided by the FBI that was not capable of being fired.
In November, according to the complaint, Cromitie said, "The best target (the World Trade Center) was hit already" and "I would like to get (destroy) a synagogue." (Additional reporting by Mark Egan; Writing by Daniel Trotta; Editing by Bill Trott) |
def add_handler(self, ns_or_h):
if self.started_ok:
raise ExhibitionistError(
"can only add handlers before server start")
for prvdr in self.providers:
handlers = self._discover(prvdr.is_handler, ns_or_h)
[prvdr.subscribe(x) for x in
handlers]
return self |
<filename>u-librock/mit/librock_fdio.h
#if !defined(librock_fdio_H)
#define librock_fdio_H 1
/* This is a compatibility and portability header to help
write sources that are platform independent.
More info at http://www.mibsoftware.com/
*/
#include <fcntl.h> //O_RDONLY, etc.
#if defined(LIBROCK_WANT_fileSha256Contents)
const char *librock_fileSha256Contents(const char *fname, unsigned char *mdBuffer32, unsigned long *contentLength);
#endif
#if defined(LIBROCK_WANT_fileGetContents)
void *librock_fileGetContents(const char *fileName, off_t *returnLength);
#endif
#if defined __MINGW32__ && defined __MSVCRT__
# include <io.h> // _sopen_s
# if !defined LIBROCK_FDIO_DEFINED
# define LIBROCK_FDIO_DEFINED 1
# define librock_fdOpenReadOnly(fname) open(fname, O_RDONLY|O_BINARY)
# define librock_fdSeek _lseek
# define librock_fdRead _read
# define librock_fdClose _close
# endif
#elif defined _MSC_VER
# if !defined LIBROCK_FDIO_DEFINED
# define LIBROCK_FDIO_DEFINED 1
# include <io.h> // _sopen_s
# include <share.h> // SH_DENYNO
int librock_fdOpenReadOnly(const char *fname)
{
int fd;
_sopen_s(&fd, fname, _O_RDONLY|_O_BINARY, _SH_DENYNO, 0/*ignored when not O_CREAT */);
return fd;
}
# define librock_fdSeek _lseek
# define librock_fdRead _read
# define librock_fdClose _close
# endif
#else
# if !defined LIBROCK_FDIO_DEFINED
# include <unistd.h> //open, etc.
# define LIBROCK_FDIO_DEFINED 1
# define librock_fdOpenReadOnly(fname) open(fname, O_RDONLY)
# define librock_fdRead read
# define librock_fdClose close
# define librock_fdSeek lseek
# endif
#endif
#endif
|
<filename>sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/query/OrderByContinuationToken.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.implementation.query;
import com.azure.cosmos.BridgeInternal;
import com.azure.cosmos.models.JsonSerializable;
import com.azure.cosmos.implementation.Utils.ValueHolder;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* While this class is public, but it is not part of our published public APIs.
* This is meant to be internally used only by our sdk.
*/
public final class OrderByContinuationToken extends JsonSerializable {
private static final String CompositeContinuationTokenPropertyName = "compositeToken";
private static final String OrderByItemsPropetryName = "orderByItems";
private static final String RidPropertyName = "rid";
private static final String InclusivePropertyName = "inclusive";
private static final Logger logger = LoggerFactory.getLogger(OrderByContinuationToken.class);
public OrderByContinuationToken(CompositeContinuationToken compositeContinuationToken, QueryItem[] orderByItems,
String rid, boolean inclusive) {
if (compositeContinuationToken == null) {
throw new IllegalArgumentException("CompositeContinuationToken must not be null.");
}
if (orderByItems == null) {
throw new IllegalArgumentException("orderByItems must not be null.");
}
if (orderByItems.length == 0) {
throw new IllegalArgumentException("orderByItems must not be empty.");
}
if (rid == null) {
throw new IllegalArgumentException("rid must not be null.");
}
this.setCompositeContinuationToken(compositeContinuationToken);
this.setOrderByItems(orderByItems);
this.setRid(rid);
this.setInclusive(inclusive);
}
private OrderByContinuationToken(String serializedOrderByContinuationToken) {
super(serializedOrderByContinuationToken);
}
public static boolean tryParse(String serializedOrderByContinuationToken,
ValueHolder<OrderByContinuationToken> outOrderByContinuationToken) {
boolean parsed;
try {
OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken(
serializedOrderByContinuationToken);
CompositeContinuationToken compositeContinuationToken = orderByContinuationToken
.getCompositeContinuationToken();
if (compositeContinuationToken == null) {
throw new IllegalArgumentException("compositeContinuationToken must not be null.");
}
orderByContinuationToken.getOrderByItems();
orderByContinuationToken.getRid();
orderByContinuationToken.getInclusive();
outOrderByContinuationToken.v = orderByContinuationToken;
parsed = true;
} catch (Exception ex) {
logger.debug(
"Received exception {} when trying to parse: {}",
ex.getMessage(),
serializedOrderByContinuationToken);
parsed = false;
outOrderByContinuationToken.v = null;
}
return parsed;
}
public CompositeContinuationToken getCompositeContinuationToken() {
ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>();
boolean succeeded = CompositeContinuationToken.tryParse(super.getString(CompositeContinuationTokenPropertyName),
outCompositeContinuationToken);
if (!succeeded) {
throw new IllegalArgumentException("Continuation Token was not able to be parsed");
}
return outCompositeContinuationToken.v;
}
public QueryItem[] getOrderByItems() {
List<QueryItem> queryItems = new ArrayList<QueryItem>();
ArrayNode arrayNode = (ArrayNode) super.get(OrderByItemsPropetryName);
for (JsonNode jsonNode : arrayNode) {
QueryItem queryItem = new QueryItem(jsonNode.toString());
queryItems.add(queryItem);
}
QueryItem[] queryItemsArray = new QueryItem[queryItems.size()];
return queryItems.toArray(queryItemsArray);
}
public String getRid() {
return super.getString(RidPropertyName);
}
public boolean getInclusive() {
return Boolean.TRUE.equals(super.getBoolean(InclusivePropertyName));
}
private void setCompositeContinuationToken(CompositeContinuationToken compositeContinuationToken) {
BridgeInternal.setProperty(this, CompositeContinuationTokenPropertyName, compositeContinuationToken.toJson());
}
private void setOrderByItems(QueryItem[] orderByItems) {
BridgeInternal.setProperty(this, OrderByItemsPropetryName, orderByItems);
}
private void setRid(String rid) {
BridgeInternal.setProperty(this, RidPropertyName, rid);
}
private void setInclusive(boolean inclusive) {
BridgeInternal.setProperty(this, InclusivePropertyName, inclusive);
}
}
|
/**
* Convert a hash to a string of hex digits.
*
* @param hash
* the hash
* @return a String representation of the hash
*/
public static String hashToString(byte[] hash) {
StringBuilder buf = new StringBuilder();
for (byte b : hash) {
buf.append(HEX_CHARS[(b >> 4) & 0xF]);
buf.append(HEX_CHARS[b & 0xF]);
}
return buf.toString();
} |
// newStoreCacheEntry creates a new cache entry with a refcount of one.
func newStoreCacheEntry() *storeCacheEntry {
m := &sync.Mutex{}
return &storeCacheEntry{
m: m,
cond: sync.NewCond(m),
refCount: 1,
}
} |
// Clase principal de Archivo
public class Ventana
{
// |------------------------Atributos privados-----------------------------|
private int Ancho;
private int Altura;
private double MaxAncho;
private double MaxAltura;
private boolean Maximizada;
// |------------------------Métodos privados-------------------------------|
// |------------------------Atributos públicos-----------------------------|
// |------------------------Métodos públicos-------------------------------|
// Constructor de clase
public void Ventana()
{
// Valores predeterminados de objeto
Altura = 720;
Ancho = 480;
MaxAncho = 1080;
MaxAltura = 1920;
Maximizada = false;
}
// Constructor copia de clase
public void Ventana(Ventana VentanaACopiar)
{
Altura = VentanaACopiar.Altura;
Ancho = VentanaACopiar.Ancho;
MaxAncho = VentanaACopiar.MaxAncho;
MaxAltura = VentanaACopiar.MaxAltura;
Maximizada = VentanaACopiar.Maximizada;
}
// |-----------------------------Getters-----------------------------------|
// Getter para atributo Ancho
public int getAncho()
{
return Ancho;
}
// Getter para atributo Altura
public int getAltura()
{
return Altura;
}
// Getter para atributo MaxAncho
public double getMaxAncho()
{
return MaxAncho;
}
// Getter para atributo MaxAltura
public double getMaxAltura()
{
return MaxAltura;
}
// Getter para atributo Maximizada
public boolean getMaximizada()
{
return Maximizada;
}
// |-----------------------------Setters-----------------------------------|
// Setter para atributo Ancho
public void setAncho(double AnchoAAsignar)
{
Ancho = (int) AnchoAAsignar;
}
// Setter para atributo Ancho
public void setAltura(double AlturaAAsignar)
{
Altura = (int) AlturaAAsignar;
}
// Setter para atributo MaxAncho
public void setMaxAncho(double MaxAnchoAAsignar)
{
MaxAncho = MaxAnchoAAsignar;
}
// Setter para atributo MaxAltura
public void setMaxAltura(double MaxAlturaAAsignar)
{
MaxAltura = MaxAlturaAAsignar;
}
// Setter para atributo Maximizada
public void setMaximizada(boolean MaximizadaAAsignar)
{
Maximizada = MaximizadaAAsignar;
}
} |
Evaluation of the Quality of Aerial Links in Low-Power Wireless Sensor Networks A wireless sensor network (WSN) assisted by Unmanned Aerial Vehicles (UAVs) can be used to monitor various phenomena in remote, extensive, inaccessible, or dangerous places. The WSN on the ground can provide close-to-the-scene sensing, in-network data processing, and multi-hop communication. The UAVs can interface the ground network with a remote control station or facilitate fast and flexible data collection. To this end, the aerial links established between the UAVs and the WSN are critical. The reliability of the links depends on many factors, including Cross Technology Interference (CTI), the relative distance of the UAVs from the ground nodes, the drive quality of the UAVs, and noise. In this paper, we present experimental results addressing some of these issues. Our experiments consisted of eleven IEEE 802.15.4 compliant transceivers, nine of which were deployed on the ground in a grid topology with a further two being attached to a UAV. From careful examination of traces extracted from received packets we concluded that CTI is the most significant factor affecting the quality of aerial links. Our observations have application for deployment related decision-making and for the design of UAV-assisted data collection protocols. |
<filename>classical_hyperopt.py<gh_stars>0
import argparse
import cProfile
import json
import logging
import os
import pickle
from time import time
import cv2
import numpy as np
import sklearn
from optuna._callbacks import RetryFailedTrialCallback
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.kernel_approximation import Nystroem
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from detection import models, utils, datasets, evaluation
from detection import trainingtools
import optuna
MAX_RETRY = 2
def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(description="Hyperparameter Optimisation for Classical Pipeline", add_help=add_help)
parser.add_argument("--train-path", type=str, help="path to training dataset")
parser.add_argument("--test-path", type=str, help="path to test dataset")
parser.add_argument("--class-file", default="classes.json", type=str, help="path to class definitions")
parser.add_argument("--db-login", type=str, help="path to database login file")
parser.add_argument("--val-split", "--tr", default=0.2, type=float,
help="proportion of training dataset to use for validation")
parser.add_argument("--iou-thresh", default=0.5, type=float, help="IoU threshold for evaluation")
parser.add_argument("--log-file", "--lf", default=None, type=str,
help="path to file for writing logs. If omitted, writes to stdout")
parser.add_argument("--log-level", default="INFO", choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"))
# Hard negative mining parameters
parser.add_argument("--neg-per-img", default=None, type=int, help="how many hard negatives to mine from each image")
parser.add_argument("--profile", action="store_true", help="run with profile statistics")
parser.add_argument("--n-cpus", type=int, help="number of cpus to use per trial")
parser.add_argument("--n-trials", type=int, help="number of optimization trials")
parser.add_argument("--study-name", type=str, help="name of optimization study")
parser.add_argument("--cached-path", type=str, help="path to cached descriptors and labels")
return parser
class Objective(object):
def __init__(self, val_dataset, n_cpus, feature_extractor, descriptors, labels):
# Hold this implementation specific arguments as the fields of the class.
self.val_dataset = val_dataset
self.n_cpus = n_cpus
self.feature_extractor = feature_extractor
self.descriptors = descriptors
self.labels = labels
def __call__(self, trial):
# Train SVM
pca_components = 300
rbf_components = 2000
alpha = trial.suggest_float('sgd_alpha', 1e-12, 1e-2, log=True)
gamma = trial.suggest_float('rbf_gamma', 1e-8, 0.1, log=True)
clf = Pipeline(steps=[('scaler', StandardScaler()),
('pca', PCA(n_components=pca_components)),
('feature_map', Nystroem(n_components=rbf_components, gamma=gamma)),
('model', SGDClassifier(alpha=alpha, max_iter=100000, early_stopping=True))])
start = time()
clf.fit(self.descriptors, self.labels)
fit_time = time() - start
logging.info(f"Fit took {fit_time:.1f} seconds")
trial.set_user_attr("fit_time", fit_time)
# Evaluate
result = trainingtools.evaluate_classifier(clf, feature_extractor=self.feature_extractor,
dataset=self.val_dataset, plot_pc=False, cpus=self.n_cpus)
return result['mAP'] if not np.isnan(result['mAP']) else 0.0
def main(args):
utils.initialise_logging(args)
utils.make_deterministic(42)
cv2.setUseOptimized(True)
cv2.setNumThreads(1)
with open(args.class_file, "r") as f:
classes = json.load(f)
# Load datasets
logging.info("Loading dataset...")
train_dataset, val_dataset = datasets.load_train_val(name='FathomNet', train_path=args.train_path, classes=classes,
val_split=args.val_split, train_transforms=None, val_transforms=None)
# Create feature extractor
feature_extractor = models.HOG(orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2),
block_norm='L2-Hys', gamma_corr=True, resize_to=(128, 128))
cache_file = os.path.join(args.cached_path, 'classical_hyperopt.cached.pickle')
if os.path.isfile(cache_file):
with open(cache_file, 'rb') as f:
descriptors, labels = pickle.load(f)
logging.info("Loading existing cached training data")
else:
# Extract features
descriptors, labels = feature_extractor.extract_all(train_dataset, cpus=args.n_cpus,
horizontal_flip=True,
rotations=[30, -30])
clf = sklearn.dummy.DummyClassifier(strategy='constant', constant=1)
clf.fit(descriptors[:2], [0, 1])
# Apply negative mining
logging.info("Performing hard negative mining")
negative_samples = trainingtools.mine_hard_negatives(clf, feature_extractor, train_dataset,
iou_thresh=args.iou_thresh, max_per_img=args.neg_per_img,
cpus=args.n_cpus)
descriptors = np.concatenate((descriptors, negative_samples))
labels = np.concatenate((labels, np.zeros(negative_samples.shape[0], dtype=np.int64)))
logging.info(
f"Added {len(negative_samples)} negatives to the {len(descriptors) - len(negative_samples)} positives")
with open(cache_file, 'wb') as f:
pickle.dump((descriptors, labels), f)
logging.info(f"Training on {len(descriptors)} samples with {len(descriptors[0])} dimensions")
objective = Objective(val_dataset, args.n_cpus, feature_extractor, descriptors, labels)
with open(args.db_login, "r") as f:
login = json.load(f)
storage = optuna.storages.RDBStorage(
"postgresql://" + login["username"] + ":" + login["password"] + "@" + login["host"] + "/postgres",
heartbeat_interval=1,
failed_trial_callback=RetryFailedTrialCallback(max_retry=MAX_RETRY),
)
search_space = {
"sgd_alpha": np.logspace(-11, -5, 7),
"rbf_gamma": np.logspace(-6, -3, 4)
}
study = optuna.create_study(
storage=storage, study_name=args.study_name, direction="maximize", load_if_exists=True,
sampler=optuna.samplers.GridSampler(search_space)
)
study.optimize(objective)
if __name__ == '__main__':
# Parse arguments
parsed_args = get_args_parser().parse_args()
if parsed_args.profile:
cProfile.run('main(parsed_args)', 'restats')
else:
main(parsed_args)
|
<reponame>rxrevu/aws-codepipeline-custom-job-worker
/*
* Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.amazonaws.codepipeline.jobworker;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext;
import org.apache.commons.daemon.DaemonInitException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.amazonaws.codepipeline.jobworker.configuration.JobWorkerConfiguration;
import com.amazonaws.codepipeline.jobworker.model.RegionNotFoundException;
import com.amazonaws.codepipeline.jobworker.configuration.CustomActionJobWorkerConfiguration;
/**
* The daemon schedules the poller at a fixed time rate.
*/
public class JobWorkerDaemon implements Daemon {
private static final Logger LOGGER = LogManager.getLogger(JobWorkerDaemon.class);
private final ScheduledExecutorService executorService;
private JobPoller jobPoller;
private long pollingIntervalInMs;
/**
* Initializes the daemon with default settings:
* Scheduled Thread Pool with pool size 1 to invoke job poller on a fixed rate.
* (Default every 30 seconds)
* Uses third party action configuration as a default.
*/
public JobWorkerDaemon() {
this(Executors.newScheduledThreadPool(1), new CustomActionJobWorkerConfiguration());
}
/**
* Initializes daemon with a custom scheduled executor service and poller.
* @param executorService scheduled executor service
* @param jobWorkerConfiguration job worker configuration class defining settings and dependencies
*/
public JobWorkerDaemon(final ScheduledExecutorService executorService, final JobWorkerConfiguration jobWorkerConfiguration) {
Validator.notNull(executorService);
Validator.notNull(jobWorkerConfiguration);
this.executorService = executorService;
initConfiguration(jobWorkerConfiguration);
}
/**
* Initializes the daemon.
* @param context daemon context.
* @throws DaemonInitException exception during initialization
*/
@Override
public void init(final DaemonContext context) throws DaemonInitException {
LOGGER.info("Initialize daemon.");
final String[] arguments = context.getArguments();
if (arguments != null){
LOGGER.debug(String.format("JobWorker arguments '%s'", String.join(", ", arguments)));
loadConfiguration(arguments);
}
}
/**
* Starts the daemon. Initializes the executor service to execute the job poller at a fixed rate.
* @throws Exception exception during start up
*/
@Override
public void start() throws Exception {
LOGGER.info("Starting up daemon.");
executorService.scheduleAtFixedRate(jobPollerRunnable(),
pollingIntervalInMs,
pollingIntervalInMs,
TimeUnit.MILLISECONDS);
}
/**
* Stops the daemon. Shuts down the executor service gracefully.
* Waits until the job poller and job processors finished their work.
* @throws Exception exception during shutdown
*/
@Override
public void stop() throws Exception {
LOGGER.info("Stopping daemon.");
this.executorService.shutdown();
try {
if (!this.executorService.awaitTermination(1, TimeUnit.MINUTES)) {
this.executorService.shutdownNow();
if (!this.executorService.awaitTermination(1, TimeUnit.MINUTES)) {
throw new IllegalStateException("Failed graceful shutdown of executor threads");
}
}
} catch (final InterruptedException e) {
this.executorService.shutdownNow();
Thread.currentThread().interrupt();
}
LOGGER.info("Stopped daemon.");
}
/**
* Destroys the daemon.
*/
@Override
public void destroy() {
LOGGER.info("Destroying daemon.");
}
private Runnable jobPollerRunnable() {
return () -> {
try {
jobPoller.execute();
} catch (final RuntimeException e) { // NOPMD
LOGGER.error("Caught exception while processing jobs", e);
}
};
}
private void loadConfiguration(final String[] arguments) throws DaemonInitException {
if (arguments.length == 1) {
final String configurationClassName = arguments[0];
try {
final JobWorkerConfiguration jobWorkerConfiguration = (JobWorkerConfiguration) Class.forName(configurationClassName).newInstance();
initConfiguration(jobWorkerConfiguration);
} catch (final InstantiationException | IllegalAccessException |
ClassNotFoundException | ClassCastException | RegionNotFoundException e) {
throw new DaemonInitException(
String.format("Provided job worker configuration class '%s' could not be loaded.", configurationClassName),
e);
}
}
}
private void initConfiguration(final JobWorkerConfiguration jobWorkerConfiguration) {
this.jobPoller = jobWorkerConfiguration.jobPoller();
this.pollingIntervalInMs = jobWorkerConfiguration.getPollingIntervalInMs();
}
}
|
// DO NOT EDIT THIS FILE - it is machine generated -*- c++ -*-
#ifndef __java_awt_print_PrinterJob__
#define __java_awt_print_PrinterJob__
#pragma interface
#include <java/lang/Object.h>
#include <gcj/array.h>
extern "Java"
{
namespace java
{
namespace awt
{
namespace print
{
class PageFormat;
class Pageable;
class Printable;
class PrinterJob;
}
}
}
namespace javax
{
namespace print
{
class PrintService;
class StreamPrintServiceFactory;
namespace attribute
{
class PrintRequestAttributeSet;
}
}
}
}
class java::awt::print::PrinterJob : public ::java::lang::Object
{
public:
static ::java::awt::print::PrinterJob * getPrinterJob();
PrinterJob();
virtual jint getCopies() = 0;
virtual void setCopies(jint) = 0;
virtual ::java::lang::String * getJobName() = 0;
virtual void setJobName(::java::lang::String *) = 0;
virtual ::java::lang::String * getUserName() = 0;
virtual void cancel() = 0;
virtual jboolean isCancelled() = 0;
virtual ::java::awt::print::PageFormat * defaultPage();
virtual ::java::awt::print::PageFormat * defaultPage(::java::awt::print::PageFormat *) = 0;
virtual ::java::awt::print::PageFormat * pageDialog(::java::awt::print::PageFormat *) = 0;
virtual ::java::awt::print::PageFormat * pageDialog(::javax::print::attribute::PrintRequestAttributeSet *);
virtual void print() = 0;
virtual void print(::javax::print::attribute::PrintRequestAttributeSet *);
virtual jboolean printDialog() = 0;
virtual jboolean printDialog(::javax::print::attribute::PrintRequestAttributeSet *);
virtual void setPageable(::java::awt::print::Pageable *) = 0;
virtual void setPrintable(::java::awt::print::Printable *) = 0;
virtual void setPrintable(::java::awt::print::Printable *, ::java::awt::print::PageFormat *) = 0;
virtual ::java::awt::print::PageFormat * validatePage(::java::awt::print::PageFormat *) = 0;
static JArray< ::javax::print::PrintService * > * lookupPrintServices();
static JArray< ::javax::print::StreamPrintServiceFactory * > * lookupStreamPrintServices(::java::lang::String *);
virtual ::javax::print::PrintService * getPrintService();
virtual void setPrintService(::javax::print::PrintService *);
private:
::javax::print::PrintService * __attribute__((aligned(__alignof__( ::java::lang::Object)))) printer;
public:
static ::java::lang::Class class$;
};
#endif // __java_awt_print_PrinterJob__
|
<reponame>vigsterkr/renku-python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019- Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Track provenance of data created by executing programs.
Capture command line execution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tracking execution of your command line script is done by simply adding the
``renku run`` command before the actual command. This will enable detection of:
* arguments (flags),
* string and integer options,
* input files or directories if linked to existing paths in the repository,
* output files or directories if modified or created while running the command.
.. note:: If there were uncommitted changes in the repository, then the
``renku run`` command fails. See :program:`git status` for details.
.. warning:: Input and output paths can only be detected if they are passed as
arguments to ``renku run``.
Detecting input paths
~~~~~~~~~~~~~~~~~~~~~
Any path passed as an argument to ``renku run``, which was not changed during
the execution, is identified as an input path. The identification only works if
the path associated with the argument matches an existing file or directory
in the repository.
The detection might not work as expected
if:
* a file is **modified** during the execution. In this case it will be stored
as an **output**;
* a path is not passed as an argument to ``renku run``.
Detecting output paths
~~~~~~~~~~~~~~~~~~~~~~
Any path **modified** or **created** during the execution will be added as an
output.
Because the output path detection is based on the Git repository state after
the execution of ``renku run`` command, it is good to have a basic
understanding of the underlying principles and limitations of tracking
files in Git.
Git tracks not only the paths in a repository, but also the content stored in
those paths. Therefore:
* a recreated file with the same content is not considered an output file,
but instead is kept as an input;
* file moves are detected based on their content and can cause problems;
* directories cannot be empty.
.. note:: When in doubt whether the outputs will be detected, remove all
outputs using ``git rm <path>`` followed by ``git commit`` before running
the ``renku run`` command.
.. topic:: Command does not produce any files (``--no-output``)
If the program does not produce any outputs, the execution ends with an
error:
.. code-block:: text
Error: There are not any detected outputs in the repository.
You can specify the ``--no-output`` option to force tracking of such
an execution.
.. cli-run-std
Detecting standard streams
~~~~~~~~~~~~~~~~~~~~~~~~~~
Often the program expect inputs as a standard input stream. This is detected
and recorded in the tool specification when invoked by ``renku run cat < A``.
Similarly, both redirects to standard output and standard error output can be
done when invoking a command:
.. code-block:: console
$ renku run grep "test" B > C 2> D
.. warning:: Detecting inputs and outputs from pipes ``|`` is not supported.
Exit codes
~~~~~~~~~~
All Unix commands return a number between 0 and 255 which is called
"exit code". In case other numbers are returned, they are treaded module 256
(-10 is equivalent to 246, 257 is equivalent to 1). The exit-code 0 represents
a *success* and non-zero exit-code indicates a *failure*.
Therefore the command speficied after ``renku run`` is expected to return
exit-code 0. If the command returns different exit code, you can speficy them
with ``--success-code=<INT>`` parameter.
.. code-block:: console
$ renku run --success-code=1 --no-output fail
"""
import os
import sys
from subprocess import call
import click
from renku import errors
from renku.api._git import _mapped_std_streams
from renku.models.cwl.command_line_tool import CommandLineToolFactory
from ._client import pass_local_client
from ._options import option_isolation
@click.command(context_settings=dict(ignore_unknown_options=True, ))
@click.option(
'outputs',
'--output',
multiple=True,
help='Force a path to be considered an output.',
)
@click.option(
'--no-output',
is_flag=True,
default=False,
help='Allow command without output files.',
)
@click.option(
'--success-code',
'success_codes',
type=int,
multiple=True,
callback=lambda _, __, values: [int(value) % 256 for value in values],
help='Allowed command exit-code.',
)
@option_isolation
@click.argument('command_line', nargs=-1, type=click.UNPROCESSED)
@pass_local_client(
clean=True,
up_to_date=True,
commit=True,
ignore_std_streams=True,
)
def run(client, outputs, no_output, success_codes, isolation, command_line):
"""Tracking work on a specific problem."""
working_dir = client.repo.working_dir
mapped_std = _mapped_std_streams(client.candidate_paths)
factory = CommandLineToolFactory(
command_line=command_line,
directory=os.getcwd(),
working_dir=working_dir,
successCodes=success_codes,
**{
name: os.path.relpath(path, working_dir)
for name, path in mapped_std.items()
}
)
with client.with_workflow_storage() as wf:
with factory.watch(
client, no_output=no_output, outputs=outputs
) as tool:
# Don't compute paths if storage is disabled.
if client.has_external_storage:
# Make sure all inputs are pulled from a storage.
paths_ = (
path
for _, path in tool.iter_input_files(client.workflow_path)
)
client.pull_paths_from_storage(*paths_)
returncode = call(
factory.command_line,
cwd=os.getcwd(),
**{key: getattr(sys, key)
for key in mapped_std.keys()},
)
if returncode not in (success_codes or {0}):
raise errors.InvalidSuccessCode(
returncode, success_codes=success_codes
)
sys.stdout.flush()
sys.stderr.flush()
wf.add_step(run=tool)
|
import React from 'react';
import { InfoCircleOutlined } from '@ant-design/icons';
import { GrapheneState } from '../../types';
interface ClearProps {
state: GrapheneState;
}
const Clear: React.FC<ClearProps> = () => {
return (
<div>
<InfoCircleOutlined /> 清空当前画布
</div>
);
};
export default Clear;
|
package com.skeqi.mes.controller.wms;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import javax.xml.namespace.QName;
import javax.xml.rpc.ParameterMode;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.interceptor.TransactionAspectSupport;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import org.apache.axis.client.Call;
import org.apache.axis.client.Service;
import org.apache.axis.encoding.XMLType;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.github.pagehelper.PageInfo;
import com.skeqi.mes.Exception.util.ServicesException;
import com.skeqi.mes.pojo.wms.CWmsApprovalT;
import com.skeqi.mes.pojo.wms.CWmsInTaskqueueT;
import com.skeqi.mes.pojo.wms.CWmsOutTaskqueueT;
import com.skeqi.mes.service.wms.InTaskqueueService;
import com.skeqi.mes.service.wms.OutTaskqueueService;
import com.skeqi.mes.util.Rjson;
import com.skeqi.mes.util.ToolUtils;
import com.skeqi.mes.util.aop.OptionalLog;
import com.skeqi.mes.util.yp.EqualsUtil;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
/**
* 出库队列
*
* @author yinp
* @date 2020年3月16日
*
*/
@RestController
@RequestMapping(value ="wms/outTaskqueue", produces = MediaType.APPLICATION_JSON)
@Api(value = "出库队列", description = "出库队列", produces = MediaType.APPLICATION_JSON)
public class OutTaskqueueController {
private String url = "http://127.0.0.1/FISClientService.asmx?wsdl";
@Autowired
OutTaskqueueService service;
@Autowired
InTaskqueueService itService;
/**
* 查询条码
* @param request
* @return
*/
@RequestMapping("/findBarCode")
public Rjson findBarCode(HttpServletRequest request) {
try {
String listNo = EqualsUtil.string(request, "listNo", "单号", true);
int materialId = EqualsUtil.integer(request, "materialId", "物料id", true);
int projectId = EqualsUtil.integer(request, "projectId", "项目id", true);
int locationId = EqualsUtil.integer(request, "locationId", "库位id", true);
List<JSONObject> list = service.findBarCode(listNo, materialId, projectId, locationId);
return Rjson.success(list);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 直接出库
* @param request
* @return
*/
@Transactional
@RequestMapping("/directDelivery")
@OptionalLog(module="仓管", module2="出库队列", method="直接出库")
public Rjson directDelivery(HttpServletRequest request) {
try {
int outTaskqueueId = EqualsUtil.integer(request, "outTaskqueueId", "出库队列id", true);
int locationId = EqualsUtil.integer(request, "locationId", "库位id", true);
String listNo = EqualsUtil.string(request, "listNo", "单号", true);
String tray = EqualsUtil.string(request, "tray", "托盘码", true);
service.directDelivery(outTaskqueueId, listNo, tray, locationId);
return Rjson.success();
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Rjson.error(e.getMessage());
}
}
/**
* 出库
* @param request
* @return
* @throws Exception
*/
@Transactional
@ApiOperation(value = "出库", notes = "出库")
@ApiImplicitParams({
@ApiImplicitParam(name = "outTaskqueueId", value = "出库队列id", required = true, paramType = "query")
})
@RequestMapping(value = "chuku0806", method = RequestMethod.POST)
@OptionalLog(module="仓管", module2="出库队列", method="出库")
public Rjson chuku0806(HttpServletRequest request) {
try {
Integer outTaskqueueId = EqualsUtil.integer(request, "outTaskqueueId", "出库队列id", true);
Integer locationId = EqualsUtil.integer(request, "locationId", "库位id", true);
boolean res = service.chuku0806(outTaskqueueId, locationId);
return Rjson.success("执行成功",true);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Rjson.error(e.getMessage());
}
}
/**
* 平库出库
*
* @param request
* @param response
* @throws Exception
*/
@Transactional
@ApiOperation(value = "平库出库", notes = "平库出库")
@ApiImplicitParams({@ApiImplicitParam(name = "outTaskqueueId", value = "出库队列id", required = true, paramType = "query")})
@RequestMapping(value = "chuku", method = RequestMethod.POST)
@OptionalLog(module="仓管", module2="出库队列", method="平库出库")
public Rjson chuku(HttpServletRequest request) {
try {
Integer outTaskqueueId = EqualsUtil.integer(request, "outTaskqueueId", "出库队列id", true);
boolean res = service.chuku(outTaskqueueId);
if(res){
return Rjson.success("执行成功",true);
}else{
return Rjson.error("执行失败,未知错误");
}
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
TransactionAspectSupport.currentTransactionStatus().setRollbackOnly();
return Rjson.error(e.getMessage());
}
}
/**
* 调用物料直流回流接口
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "调用物料直流回流接口", notes = "调用物料直流回流接口")
@RequestMapping(value = "MaterialForwardOrBackWard", method = RequestMethod.POST)
@OptionalLog(module="仓管", module2="出库队列", method="调用物料直流回流接口")
public Rjson MaterialForwardOrBackWard(HttpServletRequest request) {
try {
String namespace = "http://tempuri.org/";
String methodName = "MaterialForwardOrBackWard";
String soapActionURI = "http://tempuri.org/MaterialForwardOrBackWard";
Map<String, Object> map = new HashMap<String, Object>();
map.put("forwardOrBackWard", 1);
Service service = new Service();
Call call = (Call) service.createCall();
call.setTargetEndpointAddress(url);
call.setUseSOAPAction(true);
call.setSOAPActionURI(soapActionURI);
call.setOperationName(new QName(namespace, methodName));
call.addParameter(new QName(namespace, "str"), XMLType.XSD_STRING, ParameterMode.IN);
call.setReturnType(XMLType.XSD_STRING);
// 开始调用服务
Object obj = null;
try {
obj = call.invoke(new String[] { JSON.toJSONString(map) });
} catch (Exception e) {
e.printStackTrace();
throw new Exception("与客户端通讯失败");
}
JSONObject jo = JSON.parseObject(obj.toString());
if (jo.getBoolean("remark") == null) {
throw new Exception("客户端返回结果异常");
}
if(jo.getBoolean("remark")){
return Rjson.success("执行成功,请按回流放行按钮",true);
}else{
throw new Exception(jo.getString("reason"));
}
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 查询出库队列
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "查询出库队列", notes = "查询出库队列")
@ApiImplicitParams({
@ApiImplicitParam(name = "listNo", value = "单据号", required = true, paramType = "query"),
@ApiImplicitParam(name = "userName", value = "用户名", required = true, paramType = "query")
})
@RequestMapping(value = "findOutTaskqueue", method = RequestMethod.POST)
public Rjson findOutTaskqueue(HttpServletRequest request) {
try {
String listNo = EqualsUtil.string(request, "listNo", "单据号", true);
Integer userId = EqualsUtil.integer(request, "userId", "用户id", true);
List<CWmsOutTaskqueueT> list = service.findOutTaskqueue(listNo, userId);
return Rjson.success("查询成功",list);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 查询入库队列
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "查询入库队列", notes = "查询入库队列")
@ApiImplicitParams({@ApiImplicitParam(name = "listNo", value = "单号", required = true, paramType = "query")})
@RequestMapping(value = "findInTaskqueue", method = RequestMethod.POST)
public Rjson findInTaskqueue(HttpServletRequest request) {
CWmsInTaskqueueT dx = new CWmsInTaskqueueT();
try {
boolean leng = false;
String listNo = EqualsUtil.string(request, "listNo", "单据号", true);
dx.setListNo(listNo);
List<CWmsInTaskqueueT> list = itService.findInTaskqueue(dx);
if(list.size() == 0 ) {
leng = true;
}
return Rjson.success("查询成功",true);
} catch (Exception e) {
e.printStackTrace();
return Rjson.error(e.getMessage());
}
}
/**
* 查询审批记录集合
*
* @param request
* @param response
* @throws Exception
*/
@ApiOperation(value = "查询审批记录集合", notes = "查询审批记录集合")
@ApiImplicitParams({
@ApiImplicitParam(name = "listNo", value = "单据号", required = false, paramType = "query"),
@ApiImplicitParam(name = "pageNumber", value = "当前页", required = true, paramType = "query")
})
@RequestMapping(value = "findList", method = RequestMethod.POST)
public Rjson findList(HttpServletRequest request) {
try {
Integer userId = EqualsUtil.integer(request, "userId", "用户id", true);
String listNo = EqualsUtil.string(request, "listNo", "单据号", false);
Integer pageNumber = EqualsUtil.pageNumber(request);
PageInfo<CWmsApprovalT> pageInfo = service.findApproval(userId,listNo, pageNumber);
return Rjson.success("查询成功",pageInfo);
} catch (Exception e) {
e.printStackTrace();
return Rjson.error(e.getMessage());
}
}
/**
* 通过id查询
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "通过id查询", notes = "通过id查询")
@ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "id", required = true, paramType = "query")})
@RequestMapping(value = "findById", method = RequestMethod.POST)
public Rjson findById(HttpServletRequest request) {
try {
Integer id = EqualsUtil.integer(request, "id", "id", true);
CWmsOutTaskqueueT dx = service.findOutTaskqueueById(id);
if (dx == null) {
throw new Exception();
}
return Rjson.success("查询成功",dx);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 调用出库接口
* XT355_356_357
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "调用出库接口 XT355_356_357", notes = "调用出库接口 XT355_356_357")
@ApiImplicitParams({ @ApiImplicitParam(name = "listNo", value = "单号", required = true, paramType = "query"),
@ApiImplicitParam(name = "trayCode", value = "托盘码", required = true, paramType = "query"),
@ApiImplicitParam(name = "X", value = "X", required = false, paramType = "query"),
@ApiImplicitParam(name = "Y", value = "Y", required = false, paramType = "query"),
@ApiImplicitParam(name = "Z", value = "Z", required = false, paramType = "query"),
})
@RequestMapping(value = "MaterialOutboundXT355_356_357", method = RequestMethod.POST)
@OptionalLog(module="仓管", module2="出库队列", method="调用出库接口")
public Rjson MaterialOutboundXT355_356_357(HttpServletRequest request, HttpServletResponse response)
throws IOException, ServicesException {
try {
String namespace = "http://tempuri.org/";
String methodName = "MaterialOutbound";
String soapActionURI = "http://tempuri.org/MaterialOutbound";
String listNo = null;
String trayCode = null;
Integer X = null;
Integer Y = null;
Integer Z = null;
if(request.getParameter("listNo")!=null && !request.getParameter("listNo").equals("")){
try {
listNo = request.getParameter("listNo");
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
}
}
if(request.getParameter("trayCode")!=null && !request.getParameter("trayCode").equals("")){
try {
trayCode = request.getParameter("trayCode");
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
}
}
if(request.getParameter("X")!=null
&& !request.getParameter("X").equals("")
&& !request.getParameter("X").equals("0")){
try {
X = Integer.parseInt(request.getParameter("X"));
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
throw new Exception("'X'参数类型有误");
}
}
if(request.getParameter("X")!=null
&& !request.getParameter("X").equals("")
&& !request.getParameter("X").equals("0")){
try {
X = Integer.parseInt(request.getParameter("X"));
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
throw new Exception("'X'参数类型有误");
}
}
if(request.getParameter("Y")!=null
&& !request.getParameter("Y").equals("")
&& !request.getParameter("Y").equals("0")){
try {
X = Integer.parseInt(request.getParameter("Y"));
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
throw new Exception("'Y参数类型有误");
}
}
if(request.getParameter("Z")!=null
&& !request.getParameter("Z").equals("")
&& !request.getParameter("Z").equals("0")){
try {
X = Integer.parseInt(request.getParameter("Z"));
}catch (NumberFormatException e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
throw new Exception("'Z参数类型有误");
}
}
Map<String, Object> map = new HashMap<String, Object>();
map.put("listNo", listNo);
map.put("trayCode", trayCode);
map.put("X", X);
map.put("Y", Y);
map.put("Z", Z);
Service service = new Service();
Call call = (Call) service.createCall();
call.setTargetEndpointAddress(url);
call.setUseSOAPAction(true);
call.setSOAPActionURI(soapActionURI);
call.setOperationName(new QName(namespace, methodName));
call.addParameter(new QName(namespace, "str"), XMLType.XSD_STRING, ParameterMode.IN);
call.setReturnType(XMLType.XSD_STRING);
// 开始调用服务
Object obj = null;
try {
obj = call.invoke(new String[] { JSON.toJSONString(map) });
} catch (Exception e) {
e.printStackTrace();
throw new Exception("与客户端通讯失败");
}
JSONObject jo = JSON.parseObject(obj.toString());
if (jo.getBoolean("remark")==false) {
throw new Exception(jo.getString("reason"));
}
return Rjson.success("执行成功,即将出库",true);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 调用出库接口
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "调用出库接口", notes = "调用出库接口")
@ApiImplicitParams({ @ApiImplicitParam(name = "listNo", value = "单号", required = true, paramType = "query"),
@ApiImplicitParam(name = "trayCode", value = "托盘码", required = true, paramType = "query"),
@ApiImplicitParam(name = "X", value = "X", required = false, paramType = "query"),
@ApiImplicitParam(name = "Y", value = "Y", required = false, paramType = "query"),
@ApiImplicitParam(name = "Z", value = "Z", required = false, paramType = "query"),
})
@RequestMapping(value = "MaterialOutbound", method = RequestMethod.POST)
@OptionalLog(module="仓管", module2="出库队列", method="调用出库接口")
public Rjson MaterialOutbound(HttpServletRequest request) {
try {
String namespace = "http://tempuri.org/";
String methodName = "MaterialOutbound";
String soapActionURI = "http://tempuri.org/MaterialOutbound";
String listNo = EqualsUtil.string(request, "listNo", "单据号", true);
String trayCode = EqualsUtil.string(request, "trayCode", "托盘码", true);
Integer X = EqualsUtil.integer(request, "X", "X", true);
Integer Y = EqualsUtil.integer(request, "Y", "Y", true);
Integer Z = EqualsUtil.integer(request, "Z", "Z", true);
Map<String, Object> map = new HashMap<String, Object>();
map.put("listNo", listNo);
map.put("trayCode", trayCode);
map.put("X", X);
map.put("Y", Y);
map.put("Z", Z);
Service service = new Service();
Call call = (Call) service.createCall();
call.setTargetEndpointAddress(url);
call.setUseSOAPAction(true);
call.setSOAPActionURI(soapActionURI);
call.setOperationName(new QName(namespace, methodName));
call.addParameter(new QName(namespace, "str"), XMLType.XSD_STRING, ParameterMode.IN);
call.setReturnType(XMLType.XSD_STRING);
// 开始调用服务
Object obj = null;
try {
obj = call.invoke(new String[] { JSON.toJSONString(map) });
} catch (Exception e) {
e.printStackTrace();
throw new Exception("与客户端通讯失败");
}
JSONObject jo = JSON.parseObject(obj.toString());
if (jo.getBoolean("remark")==false) {
throw new Exception(jo.getString("reason"));
}
return Rjson.success("执行成功,即将出库",true);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
/**
* 查询库存详情
*
* @param request
* @param response
* @throws IOException
* @throws ServicesException
*/
@ApiOperation(value = "查询库存详情", notes = "查询库存详情")
@ApiImplicitParams({
@ApiImplicitParam(name = "listNo", value = "单据号", required = true, paramType = "query"),
@ApiImplicitParam(name = "locationId", value = "库位id", required = true, paramType = "query")
})
@RequestMapping(value = "findStorageDetail", method = RequestMethod.POST)
public Rjson findStorageDetail(HttpServletRequest request) {
try {
String listNo = EqualsUtil.string(request, "listNo", "单据号", true);
Integer locationId = EqualsUtil.integer(request, "locationId", "库位id", true);
JSONObject json = new JSONObject();
json.put("listNo", listNo);
json.put("locationId", locationId);
List<JSONObject> list = service.findStorageDetail(json);
return Rjson.success(list);
} catch (Exception e) {
e.printStackTrace();
ToolUtils.errorLog(this, e, request);
return Rjson.error(e.getMessage());
}
}
}
|
// NewMetrics instantiates a new Metrics implementation.
func NewMetrics(namespace string, logger Logger) Metrics {
return &metricsImpl{
internalMetrics: newMetrics("", logger),
externalMetrics: newMetrics(strings.ToLower(namespace), logger),
}
} |
# Split concatenated CallNumbers on "|" delimiter
# https://pandas.pydata.org/docs/reference/api/pandas.Series.str.split.html
import pandas as pd
from pathlib import Path
infile = Path('./data/publications.csv')
pubs = pd.read_csv(infile)
pubs["CatCallNumber"] = pubs["CatCallNumber"].str.strip()
delim = "|"
headers = "CatCallNumber"
new_df = pubs["CatCallNumber"].str.split(
pat=delim, regex=False, expand=True)
print(pubs)
|
package leetcode;
import java.util.*;
public class MonotoneStack_739 {
class Solution {
public int[] dailyTemperatures(int[] T) {
int len=T.length;
int[] ret=new int[len];
Stack<Integer> s=new Stack();
for(int i=len-1;i>=0;i--){
while(!s.isEmpty()&&T[s.peek()]<=T[i]){
s.pop();
}
if(s.isEmpty()){
ret[i]=0;
}
else{
ret[i]=s.peek()-i;
}
s.push(i);
}
return ret;
}
}
}
|
A standard coextrusion film has a core layer made of a thermoplastic elastomer first and second outer layers each formed from a polymer having a lower elasticity than the thermoplastic elastomer. The outer layers each have a layer thickness of less than 15 μm and empty or gas-filled cells formed by foaming.
Such a coextrusion film is particularly provided as an elastic material for disposable hygiene products and is used, for example, as an elastic waistband or as an elastic closure element in a diaper. Even though high demands are placed on the second stretching behavior and the elastic return force in such an application, the material must be as cost-effective as possible in view of the unit quantities of corresponding disposable products.
It must be kept in mind that multilayered laminates are frequently used for the abovementioned applications in which the coextrusion film us on one or even both sides over additional layers, particularly top layers of nonwoven.
A film with an elastic core layer and inelastic outer layers is known from U.S. Pat. No. 5,691,034. The outer layers are relatively stiff and are made, for example, of a polyolefin, particularly polyethylene or polypropylene, a polyethylene terephthalate or polyacrylate. The outer layers have a microtexture that has been produced by stretching the laminate beyond the elastic limit of the outer layers. The microtexture consists of small folds that are formed upon the elastic return of the film after second stretching, the alignment of the folds being dependent on the direction of second stretching. The film can be stretched uniaxially or also biaxially. The microtexture is perceived as a roughened, soft surface texture.
A coextrusion film is known from U.S. Pat. No. 7,449,240 that has a core layer made of a thermoplastic elastomer and outer layers made of a less elastic material. The outer layers consist of a thermoplastic plastic with a brittle consolidated molecular structure that stretches only a little when tensioned and breaks without transition when a predefined breaking limit is reached. The brittle, consolidated molecular structure is irreversibly broken open by a uniaxial second stretching transverse to the direction of extrusion that is referred to as an activation. As a result, the coextrusion film can be stretched elastically transverse to the direction of extrusion. The outer layers can be made of a polyolefin made brittle through post-treatment, a mixture of polyolefin and polystyrene or a mixture of polyolefin, polystyrene and ethylene vinyl acetate. In the direction of extrusion, which is also referred to as the machine direction, the brittle consolidated molecular structure remains intact and stiffens the material. The layered composite has no elasticity in the machine direction.
A coextrusion film with the features described above is known from US 2012/0164383. The coextrusion film has pores formed in the top layers through foaming and subsequent second stretching. By stretching the coextrusion film, which is also referred to as activation, the coextrusion film develops elastic characteristics and a textured top layer at the same time. The pores in the top layer improve the elastic characteristics of the coextrusion film that is reversibly stretchable to a great extent after its activation. The pores also result in a matte, irregular surface. |
DEONTAY WILDER slammed Floyd Mayweather after the Money Man backed Tyson Fury in the heavyweight fight.
Saturday night's bout was scored as a draw and the five-weight boxing champion reportedly walked out of the Staples Center "disgusted" by the outcome.
At the end of the fifth round, Mayweather had Fury five rounds up and reckoned the Gypsy King had done enough to claim the WBC heavyweight belt.
But Wilder hit back at his fellow American star, suggesting the Money Man could not enjoy the fight because the 41-year-old was not the centre of attention.
The Bronze Bomber said: "I’m a heavyweight and the fight, Saturday, was a great start to bring the heavyweight division back.
"Oh they loved it. They loved every bit of it. The world loved it. And I don’t think he [Mayweather] liked that.
"He wanted all the attention on him. It’s sad that it’s like that but the heavyweight division is where it’s at.
"He always wants the attention. But no more of that. I’m going to continue to do what I’ve got to do."
Wilder was asked if he thinks it will be long before he starts collecting nine-figure sums when he enters the ring like Mayweather did for the Conor McGregor fight last year.
He added: "Oh most definitely. I’m a heavyweight.
Wilder also had a pop at Anthony Joshua for fighting "second-tier opponents"
"People know we’re big guys, they know they want to see heavy hitters. And when you get a fight like that, that’s what they want to see, the excitement back.
"I’m the new Deontay Wilder of boxing. I don’t want to be Floyd. I’m coming and there’s a lot of fighters jealous of me for no reason."
As well as attacking Mayweather, Wilder had a pop at heavyweight rival Anthony Joshua for fighting "second-tier" opponents.
And he took to Instagram in an attempt to try and prove Fury did not beat the ten-second count in the twelfth round in Los Angeles.
Fury heroically got up from a brutal knockdown to keep himself in the fight for the final stages. |
/*
* io_k8s_api_rbac_v1alpha1_role_binding.h
*
* RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22.
*/
#ifndef _io_k8s_api_rbac_v1alpha1_role_binding_H_
#define _io_k8s_api_rbac_v1alpha1_role_binding_H_
#include <string.h>
#include "../external/cJSON.h"
#include "../include/list.h"
#include "../include/keyValuePair.h"
#include "../include/binary.h"
typedef struct io_k8s_api_rbac_v1alpha1_role_binding_t io_k8s_api_rbac_v1alpha1_role_binding_t;
#include "io_k8s_api_rbac_v1alpha1_role_ref.h"
#include "io_k8s_api_rbac_v1alpha1_subject.h"
#include "io_k8s_apimachinery_pkg_apis_meta_v1_object_meta.h"
typedef struct io_k8s_api_rbac_v1alpha1_role_binding_t {
char *api_version; // string
char *kind; // string
struct io_k8s_apimachinery_pkg_apis_meta_v1_object_meta_t *metadata; //model
struct io_k8s_api_rbac_v1alpha1_role_ref_t *role_ref; //model
list_t *subjects; //nonprimitive container
} io_k8s_api_rbac_v1alpha1_role_binding_t;
io_k8s_api_rbac_v1alpha1_role_binding_t *io_k8s_api_rbac_v1alpha1_role_binding_create(
char *api_version,
char *kind,
io_k8s_apimachinery_pkg_apis_meta_v1_object_meta_t *metadata,
io_k8s_api_rbac_v1alpha1_role_ref_t *role_ref,
list_t *subjects
);
void io_k8s_api_rbac_v1alpha1_role_binding_free(io_k8s_api_rbac_v1alpha1_role_binding_t *io_k8s_api_rbac_v1alpha1_role_binding);
io_k8s_api_rbac_v1alpha1_role_binding_t *io_k8s_api_rbac_v1alpha1_role_binding_parseFromJSON(cJSON *io_k8s_api_rbac_v1alpha1_role_bindingJSON);
cJSON *io_k8s_api_rbac_v1alpha1_role_binding_convertToJSON(io_k8s_api_rbac_v1alpha1_role_binding_t *io_k8s_api_rbac_v1alpha1_role_binding);
#endif /* _io_k8s_api_rbac_v1alpha1_role_binding_H_ */
|
You must enter the characters with black color that stand out from the other characters
Message: * A friend wanted you to see this item from WRAL.com: http://wr.al/14jfe
— Navigating the streets of downtown Raleigh isn't too difficult, but there are a collection of one-way streets that throw a wrench into travel for some.
The city has been working to make things easier by converting one-way streets to two-way for about 10 years now. City planning officials say the changes won't hamper Raleigh's ability to handle increased traffic as growth continues.
Joshua Bellamy, an owner of Boulted Bread near downtown, says he's happy to see the changes happening on South Street.
The bakery has done well since opening two years ago despite being a little tough to get to. Currently, South Street converts to a one-way street about a block from the bakery.
"If you're coming out of Red Hat or Citrix, you have to take this circuitous downtown route to get back here," Bellamy said.
That'll change once work on South Street and nearby Lenoir Street is complete.
Transportation planning manager Eric Lamb said the work is part of a larger plan to convert many of Raleigh's one-way streets.
"The reason why people like two-way streets is that they find them to be a lot easier to navigate," Lamb said. "Some people think they're more pedestrian friendly."
The city disagrees with the notion that cutting capacity on downtown streets is a bad thing for traffic. The reason? Raleigh's unique downtown area.
"This scale of dense mixed-use development is able to handle a lot more traffic because it doesn't all happen before 4 and 6 p.m.," Lamb said.
Bellamy is supportive of the changes, especially those happening near his business. South and Lenoir streets should be two-way by early 2017.
"It should bring a lot more people down here to South Street, not just to our business, but to other businesses as well," Bellamy said.
After the work on South and Lenoir streets is complete, the city will consider converting Salisbury, Wilmington, Morgan and Edenton streets to two-way traffic. |
A dirty cop who used his badge, service weapon and stolen NYPD raid jackets to help a gang of violent thugs rob drug dealers of $1 million cash and more than 500 pounds of cocaine faces a life sentence after pleading guilty yesterday in Brooklyn federal court.
Emmanuel Tavarez, 31, an eight-year NYPD veteran, would flash his police badge as he and his heavily armed crew stormed the hideouts of at least 100 drug dealers in New York, Philadelphia, and Bridgeport, Conn., in a spree than began at least a decade ago, prosecutors said.
“Tavarez allegedly used his police badge and falsified search warrants to stage searches and seizures of narcotics traffickers during which he and other coconspirators stole drugs and money from the traffickers,” said a statement from Brooklyn US Attorney Loretta E. Lynch.
“During one of these robberies, Tavarez restrained a victim with handcuffs. He also used his status as a police officer to obtain NYPD raid jackets and other NYPD paraphernalia and equipment for the crew so that they would appear to be authentic police officers,” Lynch said.
The crew, wearing the NYPD jackets and other gear, would learn the location of a drug dealer’s stash, stake it out and then stage a raid as if they were all police officers acting on official duty.
The gang grabbed about 250 kilograms of cocaine — more than 550 pounds — for resale, in addition to whatever cash was on hand.
Tavarez, of Deer Park, LI, was arrested in May 2010 after a lengthy investigation into the robberies by local, state and federal law-enforcement agencies.
He pleaded guilty yesterday before US Magistrate Judge Viktor V. Pohorelsky under the Hobbs Act to robbery conspiracy, conspiracy to distribute heroin and cocaine, and the use of a firearm in relation to these crimes.
The Hobbs Act was passed to combat robbery or extortion involving interstate or foreign commerce, but is also used to prosecute public corruption.
Tavarez was most recently assigned to the Housing Bureau Viper Unit in Queens.
About 15 members of the crew, which included four of the officer’s in-laws, have been charged in the robberies. |
//*****************************************************************************
//* Function Name: AceFlagsToString
//* Description: Convert the given ACE flags to a string.
//*****************************************************************************
_bstr_t AceFlagsToString(DWORD p_dwAceFlags)
{
return EnumToString(
p_dwAceFlags,
ENUM_TABLE_AND_SIZE(g_EnumTableAceFlags),
TRUE );
} |
Cell reactions with biomaterials: the microscopies. The methods and results of optical microscopy that can be used to observe cell reactions to biomaterials are Interference Reflection Microscopy (IRM), Total Internal Reflection Fluorescence Microscopy (TIRFM), Surface Plasmon Resonance Microscopy (SPRM) and Frster Resonance Energy Transfer Microscopy (FRETM) and Standing Wave Fluorescence Microscopy. The last three are new developments, which have not yet been fully perfected. TIRFM and SPRM are evanescent wave methods. The physics of these methods depend upon optical phenomena at interfaces. All these methods give information on the dimensions of the gap between cell and the substratum to which it is adhering and thus are especially suited to work with biomaterials. IRM and FRETM can be used on opaque surfaces though image interpretation is especially difficult for IRM on a reflecting opaque surface. These methods are compared with several electron microscopical methods for studying cell adhesion to substrata. These methods all yield fairly consistent results and show that the cell to substratum distance on many materials is in the range 5 to 30 nm. The area of contact relative to the total projected area of the cell may vary from a few per cent to close to 100% depending on the cell type and substratum. These methods show that those discrete contact areas well known as focal contacts are frequently present. The results of FRETM suggest that the separation from the substratum even in a focal contact is about 5 nm. |
Mercedes Rivas, left, received a kidney from her sister Maricela Atwood during a transplant operation earlier this year at the Doctors Hospital at Renaissance's Transplant Institute. DHR's transplant center was recently certified by the Centers for Medicare and Medicare Services.
EDINBURG — Eight years ago, Mercedes Rivas began vomiting consistently. Her doctors kept telling her it was food poisoning, but that wasn’t the case.
Rivas was suffering from kidney failure, a common condition among those living in the Rio Grande Valley.
It wasn’t until she ended up in the emergency room that a nephrologist, otherwise known as a kidney specialist, told her that her kidneys were failing. She was diagnosed with Wegener’s syndrome, a rare autoimmune disorder that targets the lungs and the kidneys.
“The doctor that came in said, ‘Your kidneys are going out,’” the now 52-year-old remembered Thursday.
The single mother of two began a six-month chemotherapy treatment soon after, hoping it could stop, or at least slow down, the degeneration of one of her most vital organs.
The kidneys serve a dual function, Dr. Mourad Alsabbagh said Thursday. When working properly, they rid the body of excess fluids and toxins created by metabolic reactions when digesting food.
Renal, or kidney, failure will usually go undetected and undiagnosed. It is asymptomatic, he said, which means patients don’t usually know there’s a problem until they are properly diagnosed by a physician who will test their urine and blood samples.
Rivas didn’t know it then, but she and her sister would eventually become vital in ushering much-needed healthcare services to South Texas.
Before last month, anyone seeking a kidney transplant had to travel at least four hours to San Antonio or beyond.
No hospital in the Rio Grande Valley could offer a kidney transplant — even though plenty of people here require such services.
Transplants are highly regulated in the United States, Alsabbagh said. In order for a hospital to obtain permission to even attempt such a procedure requires a green light from UNOS, the United Network for Organ Sharing.
Doctors Hospital at Renaissance, the largest physician-owned hospital in the country, initially obtained permission from UNOS to perform transplants in December 2016.
The permission was the first hurdle DHR had to overcome. Then, there was certification from the Centers for Medicare and Medicaid Services.
In order to obtain a green light from the federal government, DHR had to undergo an extensive review, perform three successful transplants and pass an onsite inspection.
The certification is essential not only for patients with Medicare and Medicaid, but also for those with private insurance.
Rivas’ chemotherapy worked — for a while, at least.
But two years ago, one of her kidneys shut down and she was placed on dialysis.
The 52-year-old was routinely in and out of the hospital on a monthly basis, sometimes weekly, all while her condition worsened.
Last year, she ended up in the intensive care unit.
Soon after, her sister Maricela Atwater underwent testing to see if she was a match for Rivas.
Atwater moved from Michigan and left her job to donate a kidney to her sister.
After five months of testing, and one heartbreaking rescheduling issue, Rivas became the fifth person to receive a kidney transplant at DHR.
There are almost 30,000 people with kidney disease living in South Texas, and of those, almost 5,000 are undergoing dialysis.
Because of the disease’s nature, there are many others who have yet to be diagnosed.
“I guarantee those numbers double the actual number,” the nephrologist said.
Diabetes and hypertension account for about 70 percent of patients with renal failure, Alsabbagh estimated. And in the Rio Grande Valley, the incidence of these conditions is very high.
Diabetes rates in the United States fall between 25 to 30 percent. In South Texas, it’s close to 40 percent, Alsabbagh said.
Currently, there are 90 people on DHR’s waiting list for a kidney transplant, and that number is expected to go up very soon now that the hospital obtained the certification from CMS.
Last month, CMS surveyors came in unannounced to conduct the onsite review of the hospital’s policies, processes and conditions. They also reviewed the six successful transplants the hospital had already performed, including Rivas.
Alsabbagh believes the hospital will be able to perform about 60 transplants within a year.
Rivas woke from the surgery and immediately felt different.
Though she doesn’t remember what she was saying.
Shortly after, the sisters shared an emotional reunion at the hospital.
Atwater was overcome with emotion.
It’s been two months since the procedure, and they both feel great, they said. Atwater is managing her diet and her weight now that her body depends on one kidney and Rivas is trying to slow herself down as directed by Alsabbagh.
Alsabbagh signed paperwork Thursday that will allow her to return to work at McAllen High School this week.
Atwater continues to search for employment. She was unable to get family medical leave at her previous post because she hadn’t worked there long enough, she said.
GoFundMe account for Mercedes Rivas. |
VANCOUVER B.C. – Jerry Kroll, B.C. Green Party candidate for Vancouver-Mount Pleasant, has been appointed by leader Andrew Weaver as the party’s Spokesperson for Transportation. Kroll is an entrepreneur, the founder of KleenSpeed Technologies at the NASA research park in Mountain View, California, and President and CEO of ElectraMeccanica Vehicles Corp.
“I am very pleased to be the B.C. Green Party Spokesperson for Transportation, as mobility is an area that affects everyone, from children going to school, to people going to work, and businesses trying to deliver products”, Kroll said. “None of these groups can be happy with the amount of congestion and disorganization on the streets of BC, or the cost of insuring vehicles.
"The world is clearly transitioning away from fossil fuel transportation, and while countries in Europe are leading the way, B.C. is sliding further and further behind in both technology and adoption. Delivering affordable, efficient public transit; ensuring fair and dependable province-wide ferry service; cleaning up emissions; easing congestion on roadways, and making British Columbia’s drivers the safest and best drivers in the world is my mandate, and it won’t take long. Everyone can see the future; we just need the common sense to proceed!”
“Jerry has dedicated his life in recent years to solving challenges in transportation through innovative work in energy and climate change mitigation”, Weaver added. “He makes an ideal spokesperson for transportation, which is critical to a sustainable 21st century economy and society. I would like to thank him for bringing his leadership to this file.”
- 30 -
Media contacts
For Jerry Kroll
Jerry Kroll, candidate for Vancouver-Mount Pleasant
+1 604-687-3088 | [email protected]
For Andrew Weaver
Stefan Jonsson, Director of Communications
+1 250-514-0288 | [email protected] |
import scipy.misc as scm
def aCb(n, r):
ans = 1
for x in range(1, n + 1):
ans *= x
for x in range(1, n - r + 1):
ans //= x
for x in range(1, r + 1):
ans //= x
return ans
N, A, B = list(map(int, input().split(" ")))
v = list(map(int, input().split(" ")))
sorted_v = sorted(v)[::-1]
ans= sum(sorted_v[:A])/A
print(ans)
n = sorted_v.count(sorted_v[A-1])
if ans != sorted_v[A-1]:
r = sorted_v[:A].count(sorted_v[A-1])
print(int(aCb(n, r)))
else:
C = 0
for num in range(A, B+1):
# print(num)
if num <= n:
C += (aCb(n, num))
else :
break
print(int(C))
|
<reponame>KalebKE/CAExplorer
/*
TwoDimensionalFileStorage -- a class within the Cellular Automaton Explorer.
Copyright (C) 2005 <NAME> (http://academic.regis.edu/dbahr/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package cellularAutomata.io;
import java.io.PrintWriter;
import java.util.Iterator;
import cellularAutomata.CurrentProperties;
import cellularAutomata.Cell;
import cellularAutomata.cellState.model.CellState;
import cellularAutomata.lattice.Lattice;
import cellularAutomata.lattice.TwoDimensionalLattice;
/**
* Writes the final state of a two-dimensional cellular automaton to a file.
*
* @author <NAME>
*/
public class TwoDimensionalFileStorage extends FileStorage
{
// the number of generations that will be saved
int numGenerationsToSave = 1;
// file data delimiters
private String delimeters = null;
/**
* Creates a file where data will be stored.
*
* @param numGenerationsToSave
* The number of generations that will be saved.
*/
public TwoDimensionalFileStorage(
int numGenerationsToSave)
{
super();
this.numGenerationsToSave = numGenerationsToSave;
}
/**
* Saves two-dimensional cellular automaton data.
*
* @param lattice
* The CA lattice of cells to be saved.
*/
public void save(Lattice lattice)
{
// save all the properties into the file
saveProperties();
//now save the data
PrintWriter fileWriter = getFileWriter();
// get an iterator over the lattice
Iterator cellIterator = lattice.iterator();
TwoDimensionalLattice twoLattice = (TwoDimensionalLattice) lattice;
// must do this here, not the constructor, because the delimiters may
// change.
delimeters = CurrentProperties.getInstance().getDataDelimiters();
// the generation being saved
int firstGen = 0;
// the last generation to save
int lastGeneration = 0;
// get values for the first and last generations to save
Iterator iter = lattice.iterator();
lastGeneration = ((Cell) iter.next()).getGeneration();
firstGen = lastGeneration - (numGenerationsToSave - 1);
// write the data once for each generation
for(int generation = firstGen; generation <= lastGeneration; generation++)
{
fileWriter.println("//generation " + generation);
// get each cell
for(int i = 0; i < twoLattice.getHeight(); i++)
{
for(int j = 0; j < twoLattice.getWidth(); j++)
{
Cell cell = (Cell) cellIterator.next();
// a 1 or a 0 for that position in the current generation
String state = "";
try
{
CellState cellState = cell.getState(generation);
if(state != null)
{
state = cellState.toString();
}
}
catch(Exception e)
{
// do nothing
}
// write the value in the state
fileWriter.print(state);
// add delimiters
if(cellIterator.hasNext())
{
fileWriter.print(delimeters);
}
}
// add a new line so the next row is printed on the next line
fileWriter.println();
}
// need to iterate over the lattice again
if(numGenerationsToSave > 1)
{
// reset the iterator
cellIterator = lattice.iterator();
}
}
close();
}
}
|
The stable isotope stratigraphy and paleosols of North America's most southern exposure of late Paleocene/Early Eocene fossiliferous continental deposits: documenting the initial Eocene thermal maximum in Big Bend National Park, Texas A chemostratigraphic section across the Paleocene/Eocene boundary, using the stable isotopes of carbon and oxygen, has been developed for North America's most southern exposure of early Paleogene continental deposits in which the boundary is constrained by fossil mammals. A negative carbon excursion has been identified within C24r. The range in 13C values is from -8.1 to -13.20/00. Until the development of the chemostratigraphic section it was uncertain if the earliest Eocene was recorded in Big Bend. An early Wasatchian (Wa1) fossil site occurs stratigraphically higher than the carbon excursion and has yielded the stratigraphically lowest Hyracotherium in the Big Bend region. Based on the stable isotope stratigraphy, time equivalent to Wa0 is recorded in Big Bend but no Wa0 fossils have been found. To examine the possible effects of the initial Eocene thermal maximum (IETM) on pedogenesis in the study area, the chemical index of alteration (CIA) was calculated for pre IETM paleosols and paleosols that occur within the negative carbon excursion. Pre IETM paleosols have CIA values that indicate moderate weathering. IETM paleosols have CIA values that indicate moderate to intense weathering. The clay mineralogy of pre IETM paleosols is dominated by smectite, and it is only within the carbon excursion that there is a change. Kaolinite increases from 2% to 17% in one paleosol horizon that is associated with the carbon excursion. Other notable differences are an increase in the translocation of clays and irons, an increase in base loss and a decrease in the amount of calcite in IETM paleosols. These changes suggest that the climate must have been moister during this time. An increase in hydrolysis reactions caused by an increase in |
// AttachTrait attaches a trait to a component
func (s *APIServer) AttachTrait(c *gin.Context) {
var body apis.TraitBody
body.EnvName = c.Param("envName")
body.AppName = c.Param("appName")
body.ComponentName = c.Param("compName")
if err := c.ShouldBindJSON(&body); err != nil {
util.HandleError(c, util.InvalidArgument, "the trait attach request body is invalid")
return
}
util.AssembleResponse(c, "deprecated, please use appfile to update", nil)
} |
On October 5, 2018, Island Records/UMe will release the 30th anniversary, 2-CD deluxe edition of Anthrax’s fourth studio album, State of Euphoria. The package will consist of two discs: Disc One is the fully-remastered album, all of the B-sides originally released in conjunction with the album, and “Antisocial” recorded live in London at the Hammersmith Odeon in March, 1989; Disc Two, “Charlie’s Archives,” is a real treasure trove for Anthrax fans as it puts you right in the room with the band as the album’s songs evolved. The package also includes a 20-page booklet that will take you right back to the time of the album’s release.
State of Euphoria will also be issued as a 2-LP set on standard black vinyl and limited edition red and yellow colored vinyl. Both versions are available for pre-order here.
The anniversary package was helmed by the band’s drummer and long-time archivist Charlie Benante, who dove into his Anthrax vault to come up with the tapes that show the evolution of the album’s songs. Disc Two takes you through the writing, development and recording process of the album’s songs. The listener is taken into the band’s writing and rehearsal sessions hearing the band members construct the song, deciding what stays and what doesn’t, and then into the studio for the recording of basic tracks. “We recorded everything back then,” said Benante. “We would sit in the rehearsal room with a little two-track machine. record everything we did in rehearsal while we were putting the songs together.” Also pulled from his vault were tapes of the actual recording sessions, all the takes, that also appear on Disc Two.
In addition to the original album’s artwork that includes the Mort Drucker-drawn (Mad Magazine) back cover, the anniversary package includes never before seen personal snapshots of the band members, tour admats/posters, plus two Anthrax magazine covers, all culled from that 1988-1989 period. For the booklet, the band enlisted British journalist Alexander Milas, former editor-in-chief of Metal Hammer, to write the sleeve notes, Shawn Franklin handled editing tracks and Paul Logus mastered the set. Benante also took to social media, inviting fans to tell him what State of Euphoria had meant to them. He was overwhelmed with responses, and many of them are included in the booklet. “I think fans will be really excited when they see their own quotes in the package,” Benante added.
State of Euphoria was produced by Anthrax and Mark Dodson, includes fan-favorites “Antisocial” and “Be All, End All,” and was certified Gold a few months after release. |
package me.elsiff.morefish.listener;
import org.bukkit.Location;
import org.bukkit.Material;
import org.bukkit.Sound;
import org.bukkit.block.ShulkerBox;
import org.bukkit.entity.Item;
import org.bukkit.event.EventHandler;
import org.bukkit.event.EventPriority;
import org.bukkit.event.Listener;
import org.bukkit.event.block.BlockPlaceEvent;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.BlockStateMeta;
import org.bukkit.inventory.meta.ItemMeta;
public class TreasureListener implements Listener {
@EventHandler(priority = EventPriority.LOWEST)
public void onPlaceTreasure(BlockPlaceEvent event) {
if (event.getBlock().getType() == Material.BLACK_SHULKER_BOX) {
ItemMeta meta = event.getItemInHand().getItemMeta();
if (meta != null && meta.getCustomModelData() == 1337) {
event.setCancelled(true);
event.getItemInHand().setAmount(event.getItemInHand().getAmount() - 1);
BlockStateMeta im = (BlockStateMeta) meta;
ShulkerBox shulker = (ShulkerBox) im.getBlockState();
Location location = event.getBlock().getLocation().clone().add(0.5, 0.5, 0.5);
location.getWorld().playSound(location, Sound.AMBIENT_UNDERWATER_ENTER, 1, 1);
location.getWorld().playSound(location, Sound.BLOCK_ENDER_CHEST_OPEN, 1, 1);
for (ItemStack i : shulker.getInventory()) {
if (i == null || i.getType() == Material.AIR) {
continue;
}
Item item = location.getWorld().dropItemNaturally(location, i);
item.setOwner(event.getPlayer().getUniqueId());
}
}
}
}
}
|
Total Mercury, Methylmercury, and Ancillary Water-Quality and Streamflow Data for Selected Streams in Oregon, Wisconsin, and Florida, 2002-06 Field and analytical methods, mercury and ancillary water-quality data, and associated quality-control data are reported for eight streams in Oregon, Wisconsin, and Florida from 2002 to 2006. The streams were sampled as part of a U.S. Geological Survey National Water-Quality Assessment Program study of mercury cycling, transport, and bioaccumulation in urban and nonurban stream ecosystems that receive mercury predominantly by way of atmospheric deposition. |
<filename>src/bbtag.ts
export class BBTag {
constructor(
public tagName: string, //The name of the tag
public insertLineBreaks: boolean, //Indicates if line breaks are inserted inside the tag content
public suppressLineBreaks: boolean, //Suppresses any line breaks for nested tags
public noNesting: boolean, //Indicates if the tag supports nested tags
public markupGenerator: (tag: BBTag, content: string, attributes: { [key: string]: string }) => string = (
tag,
content
) => `<${tag.tagName}>${content}</${tag.tagName}>`
) {}
//Creates a new simple tag
public static createSimpleTag(tagName: string, insertLineBreaks = true): BBTag {
return new BBTag(tagName, insertLineBreaks, false, false);
}
//Creates a new simple tag
public static createSimpleHTMLTag(tagName: string, htmlTag: string, insertLineBreaks = true): BBTag {
return new BBTag(
tagName,
insertLineBreaks,
false,
false,
(tag, content) => `<${htmlTag}>${content}</${htmlTag}>`
);
}
//Creates a tag with the given generator
public static createTag(
tagName: string,
markupGenerator: (tag: BBTag, content: string, attributes: { [key: string]: string }) => string,
insertLineBreaks = true
): BBTag {
return new BBTag(tagName, insertLineBreaks, false, false, markupGenerator);
}
}
|
Networked idiots: Affective economies and neoliberal subjectivity in a Russian viral video Idiot is usually a term of derision. In this article, we reconsider the common meaning as designating a stupid person and return to an earlier etymology as signifying a private and independent individual. In ancient Greece, being idiotic meant engaging in the contemplative process of becoming an individual. At times, this pursuit of individuation differentiated such individuals as their acts occurred in public and were seen as absurd, out-of-the-ordinary and, frankly, idiotic, as most now know the term. With the widespread use of social media and digital video, these once private or semi-public acts of individuation often become explicitly public acts for others to see, critique and mimic. Social media has made it possible for these explorations of self to circulate where their emotional intensities resonate with or are rejected by viewers, are captured for profit by media corporations, and leveraged by their producers into media careers. Using a case study from Russian social media, this article describes the affective economy of idiotic videos and how the history of one Internet video illustrates the circulation, capture and self-capitalization attendant with neoliberalism. |
1. Field of the Invention
The present invention relates to a dampening volume control apparatus for offset press, more specifically, as to an apparatus for setting desired values of dampening volume.
2. Description of the Prior Art
In offset printing, phenomena so called scumming or similar problems that ink is oversupplied even to non-imaging areas of printing papers occurs when less amount of dampening solution relative to that of the ink is supplied to an offset plate. On the contrary, occurrence of emulsification including water streak and uneven density of printing are observed when too much amount of the dampening solution relative to that of the ink is supplied to the offset plate. So that, it is necessary to maintain the amount of both the ink and the dampening solution supplied to the offset plate within an appropriate range during printing work.
In order to prevent both the phenomena and the emulsification, a dampening volume control apparatus shown in FIG. 3 which keeps supply amount of the dampening solution in a certain volume is proposed (see Japanese laid-open publication No. Hei 4-83640). In the control apparatus, a desired value of the dampening volume (a supply amount of the dampening solution) is stored in a controller 123, then actual dampening volume of the dampening solution supplied to a plate 33 is detected by a dampening volume sensor 41. The detected volume of the dampening solution is compared with the stored value. Revolution speed of a motor 25 is controlled by the controller 123 in accordance with a result of the comparison. In this way, revolution speed of a water fountain roller 28 is varied. So that, feed-back control is carried out so as to make the dampening solution supplied to the plate 33 coincide with the desired value stored in the controller 123 as a result of varying the revolution speed of the water fountain roller 28.
However, the dampening volume control apparatus described earlier has following problems to be resolved. Quality of printing can be controlled by storing the desired value in the controller 123 when the desired value is determined as a certain value. Nevertheless, the desired value itself is varied by following factors such as room temperature, humidity at the room, material of the plate and others. Actually, the desired value is determined by an operator of the offset press under trial and error bases by referring the quality of the printing. In a concrete form, the desired value is adjusted in accordance with the quality of the printing done on the printing papers by carrying out following procedures. A desired value is set for temporary purpose, and then printing work is carried out until a volume of the dampening solution supplied to the offset plate becomes a stable condition as shown in FIG. 4. The procedures described in above need to be repeated. As a result, it is required both a certain period of time and printing papers to determine a desired value appropriate for printing in a good condition.
The time and the printing papers consumed during the adjustment occupies a large amount of total resources, especially in a small lot printing carried out recently. |
It’s time. You asked for it, we brought it back: the Curious City podcast.
What’s in store? Stories about an old asylum, nuclear radiation, panhandler economics, the tunnels beneath Chicago’s Loop and collective memories of a shuttered amusement park — to name just a few.
So, get yourself subscribed already!
For iOS devices subscribe via iTunes.
For Android devices subscribe via Feedburner.
If you listened to our first season, you know we experimented with format. We brought you a mix of stories and conversations that aired on WBEZ along with original content you could not get anywhere else.
Thanks to your feedback in our podcast survey, we learned a lot. Above all, many of you miss our stories that air on WBEZ 91.5 FM and want a one-stop shop to catch all of our feature-length stories. We’ve rounded up the best for this season and one’s ready for you now: reporter Alex Keefe’s explanation of what a Chicago alderman’s job really entails. (Hint: legislation, pigeon poop, pregnancy tests and chainsaws).
We plan to keep experimenting with format, and our hope is to bring you all-original podcast content that you hear before our radio audience does. But we need more time to get that in order. After all, we’re pumping out more stories than we did last year, plus we’re busy building a better website and hopefully an empire that’ll spread to other cities.
Thanks for your ears and feedback! We hope you enjoy!
Curious City gives you extra curiosities on Facebook and Twitter. |
import * as React from "react";
import { RouteComponentProps, withRouter, Link } from "react-router-dom";
import axios from "axios";
import authHeader from "../../services/auth-header";
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faSave, faArrowLeft } from '@fortawesome/free-solid-svg-icons';
import { formatTimestamp } from "../../helper";
export interface IValues {
[key: string]: any;
}
export interface IFormState {
id: string;
todo: any;
values: IValues[];
submitSuccess: boolean;
}
class EditTodo extends React.Component<RouteComponentProps<any>, IFormState> {
constructor(props: RouteComponentProps) {
super(props);
this.state = {
id: this.props.match.params.id,
todo: {},
values: [],
submitSuccess: false,
};
}
public componentDidMount(): void {
axios.get(`${process.env.REACT_APP_NODE_URL}/todos/${this.state.id}`, { headers: authHeader() }).then((response) => {
this.setState({ todo: response.data.data });
});
}
private processFormSubmission = async (e: React.FormEvent<HTMLFormElement>): Promise<void> => {
e.preventDefault();
if (this.state.todo.title === "") {
return;
}
axios.patch(`${process.env.REACT_APP_NODE_URL}/todos/${this.state.id}`, this.state.values, { headers: authHeader() }).then((data) => {
this.setState({ submitSuccess: true });
setTimeout(() => {
this.props.history.push("/my-todos");
}, 1500);
});
};
private setValues = (values: IValues) => {
this.setState({ values: { ...this.state.values, ...values } });
};
private handleInputChanges = (e: React.FormEvent<HTMLInputElement>) => {
e.preventDefault();
this.setValues({ [e.currentTarget.id]: e.currentTarget.value });
};
public render() {
const { submitSuccess } = this.state;
return (
<div className="App">
{this.state.todo && (
<div>
<div>
<div className="jumbotron jumbotron-fluid">
<div className="container">
<div className="row">
<div className="col-12">
<h1 className="display-4">Edit todo</h1>
<span> This todo was created <strong>{formatTimestamp(this.state.todo.createdAt)}</strong></span>
</div>
</div>
</div>
</div>
<div className="container">
<div className="row">
{submitSuccess && (
<div className="alert alert-success fade show h6" role="alert">
<strong>Well done!</strong> Your todo has been updated.
</div>
)}
<div className="col-12">
<form id={"create-post-form"} onSubmit={this.processFormSubmission} noValidate={true}>
<div className="form-group">
<label htmlFor="title"> Title </label>
<input
type="text"
id="title"
defaultValue={this.state.todo.title}
onChange={(e) => this.handleInputChanges(e)}
name="title"
className="form-control"
placeholder="Enter title"
pattern="^.{1,30}$"
required
/>
</div>
<div className="form-group">
<label htmlFor="description"> Description </label>
<input
type="text"
id="description"
defaultValue={this.state.todo.description}
onChange={(e) => this.handleInputChanges(e)}
name="description"
className="form-control"
placeholder="Edit description"
pattern="^.{1,50}$"
/>
</div>
<div className="form-group">
<button className="btn btn-success mr-2" type="submit">
<FontAwesomeIcon className="mr-1" icon={faSave} /> Save
</button>
<Link
to="/my-todos"
className="btn btn-primary"
>
<FontAwesomeIcon icon={faArrowLeft}/> Cancel and get back to Todo's
</Link>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
)}
</div>
);
}
}
export default withRouter(EditTodo);
|
Source, harvesting, conservation status, threats and management of indigenous plant used for respiratory infections and related symptoms in the Limpopo Province, South Africa Abstract. Semenya SS, Maroyi A. 2019. Source, harvesting, conservation status, threats and management of indigenous plant used for respiratory infections and related symptoms in the Limpopo Province, South Africa. Biodiversitas 20: 789-810. This survey explored Bapedi traditional healers (THs) practices pertinent to native plants used to treat respiratory infections (RIs) and related symptoms (RSs). Semi-structured questionnaires and participatory observations were used to gather information from 240 THs in the Limpopo Province, South Africa. 186 plants from 75 families were harvested by these THs, mainly from the communal lands (81.2%), throughout the year. Plant parts used for RIs and RSs remedies was destructively harvested in wilderness compared to homegardens. Most (n=174) species from which these parts are obtained appears on the South African National Red Data List of plants, with 88.5% having a list concern status. This included Adansonia digitata, Boscia albitrunca, Catha edulis, Securidaca longepedunculata and Sclerocarya birrea which are also protected under the National Forest Act of 1998 (Act no. 84 of 1998). A further, 8.6% (n=15) of Red Data Listed plants are of conservation concern, with various status namely near threatened (38.3%), declining (20%), data deficient (13.3%), critically endangered and vulnerable (3.3%, for each), as well as endangered (6.6%). There were both consensus and disjunction amongst THs and Red Data List regarding the status of plants in the wild. This study provides valuable data for the conservation of medicinal plants in Limpopo Province. |
Odontogenesis and amelogenesis in interacting lizardQuail tissue combinations In this study we examined the possible inductive role of the dental papilla from polyphyodont lizard tooth germs. Flank skin sheets of quail ectoderm enzymatically separated from dermal tissue were recombined with lizard tooth papillae and placed on semisolid medium and cultured for 2 days. Subsequently, the recombinants were removed and placed on the chorioallantoic membrane of chick hosts and incubated for 6 days. After this period of 8 days in explant, control tissues differentiated according to their own phenotypes. Lizard dental papilla alone differentiated as fibroblasts. Quail flank skin ectoderm differentiated into epithelial sheets. Intact lizard tooth buds developed into teeth with dentine and incipient enamel. In the best experimental recombinants, advanced and relatively wellconstructed teeth were observed, with clear indications of hard tissue deposition in association with quail epithelium. The results show that mesenchyme of the adult lizard dental papilla and embryonic quail ectoderm of heterotopic origin are capable of carrying out the complex sequence of morphogenetic interactions involved in normal odontogenesis. |
Smart computers still pose a tech challenge for some people of a certain generation: Victor Schukov.
Old UNI had a modem that could only be hooked up to a rotary phone. It came with a pointing device that was a wooden stick and an electrical surge protector that was a rubber blanket. And a real mouse lived inside it.
Fed up one day, I announced to the family “We’re just going to have to spring for a much more powerful PC with vector tracing, voice recognition, virtual reality vaporware and 200 megs of dim and, oh yeah, we need more than one i/o. (I had no idea what I was talking about. I heard it somewhere. The only i/o I could refer to was in the song Old MacDonald Had a Farm. (e/i/e/i/o) So, finding no more deals at the scrap yard, I bought a whole new system from a reputable store manned by a pre-pubescent geek with more tape on the nose bridge of his glasses than I had on my hockey stick.
My newest computer is so smart, it took itself out of the box when we got home. I am convinced modern computers have hidden artificial intelligence. Mine sniggers when I sign in. It’s bored. I’m afraid to push the HELP button because I may never get out of the explanation. People who can’t handle rejection should not own modern computers.
Today, a three-year old can get in and out of Windows and track the movement of Russian intercontinental missiles. I can’t find the power button. (I think the alt button is to tell you what altitude you’re at.) I am proud to say, however, that I am fully licensed to run thesaurus and spell check. Now, if only I can find the power button. |
def testOneHotLabels(self):
num_classes = 400
batch_size = 7
label_indices = np.random.randint(0, num_classes, size=(batch_size, 3))
labels = tf.one_hot(label_indices, depth=num_classes, dtype=tf.float32)
logits = np.random.uniform(size=(batch_size, 3, num_classes)) * 10 + 1e7
logits_tensor = tf.convert_to_tensor(logits, dtype=tf.float32)
losses = label_lib.MultiLabelContrastiveLoss(labels, logits_tensor)
expected = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits_tensor)
self.assertAllClose(expected, losses) |
New Use of BIM-Origami-Based Techniques for Energy Optimisation of Buildings : Outstanding properties and advanced functionalities of thermalregulatory by origami-based architecture materials have been shown at various scales. However, in order to model and manage its programmable mechanical properties by Building Information Modelling (BIM) for use in a covering structure is not a simple task. The aim of this study was to model an element that forms a dynamic shell that prevents or allows the perpendicular incidence of the sun into the infrastructure. Parametric modelling of such complex structures was performed by Grasshopper and Rhinoceros 3D and were rendered by using the V-rays plugin. The elements followed the principles of origami to readjust its geometry considering the sun position, changing the shadow in real time depending on the momentary interest. The results of the project show that quadrangular was the most suitable Origami shape for faade elements. In addition, a BIM-based automated system capable of modifying faade elements considering the sun position was performed. The significance of this research relies on the first implementation and design of an Origami constructive element using BIM methodology, showing its viability and opening outstanding future research lines in terms of sustainability and energy efficiency. argued and revaluated during several evaluation rounds. The results Introduction Origami is the ancient art of paper folding and comes from Japanese cultural background: from ori meaning folding and kami meaning paper. It is a metamorphic art in which a piece of paper is transformed without the need to add or remove material. The geometries and volumes are obtained by means of folds and creases. Various practical applications in construction have taken place in the last few years despite the rich aesthetic history of this art. Recent developments in computer science, number theory, and computational geometry have led the way for powerful new techniques of analysis and design which nowadays extend beyond art. Space reduction in the folded versus unfolded position was found as an initial benefit. However, when the effects of climate change were highlighted at the Paris Climate Conference in December 2015 (melting poles, extreme weather events and rising sea levels), new challenges appeared. Moreover, an important increase of the greenhouse gas emissions was detected. This pointed out to an improvement of the global warming tendency. The main aim of this study was to combine origami-based roofing elements with BIM methodology in order to allow considering the environmental conditions, such as sun radiation, throughout the infrastructure life span. The main research structure was focused on the origami-BIM design, analysing its modelling and programming viability. This paper establishes future research lines that could be oriented to energy simulation or economic estimations. For the viability component design analysis, the research hosts the whole process of element creation, dealing with all possible design barriers such as interoperability, programming, or visualization. Initially, elements based on origami principles were modelled for use in a roof structure. This was a simulation of both the roof and the movable cladding used in the infrastructure. Various software from major developers were considered to create the BIM model, i.e., Revit or Dynamo from Autodesk, OpenBuildings De-signer or MicroStation from Bentley, and Grasshopper from Rhino for the animation of the elements have been considered. Other applications that were considered for additional functions were Synchro, Civil 3D, and Rhinoceros 3D. All of them require exhaustive research into the functions provided by each software in order to make best use of them. The simulation of the movement of the elements were carried out according to the project location and the season of the year, i.e., to favour the light passage in order to reduce heating and artificial lighting in its folded position or to reduce air-conditioning consumptions on its unfolded position. Therefore, careful consideration has been given to the selected origami technique and its integration into the environment for the studied application. As a final result, the project sought to reach the main objectives: A complete process of BIM-based Origami implementation. Considering Software and Origami possibilities or data interoperability with Origami techniques A BIM-based automated system capable of modifying the geometry of faade elements based on the external sun trajectory For this purpose, real applications of Origami techniques in the Architecture, Engineering, and Construction (AEC) sector, particularly on roofs and external faades, are shown in the first section of the paper, including a review of the published literature that has shown the advantages of BIM for energy management. Thereupon, a new BIM-based design method for external building elements is proposed given that multiple profits have been directly attributed to the use of BIM. Improvements of energy analysis or time reduction for the design of alternatives are examples of some possible benefits shown in previous published research. The Results section shows the BIM-based automated system with Origami techniques. This automates system was performed in this study on a generic building in order to implement the methodology. The conclusions show the outstanding possibilities and synergies of the use of these techniques simultaneously for future designers. Origami on AEC Sector The Architecture, Engineering, and Construction (AEC) sector must adapt to meet this challenge. In order to do so, new opportunities are opening through innovation, such as the use of Building Information Modelling (BIM) together with digitalisation and Internet of Things (IoT), the use of metamaterials, and the use of programmable origamibased properties that allow mechanical unfolding. In this regard, the use of these singular elements is no longer only applied to achieve an impressive geometry in roof elements, mobility and automatization for energetic and functional purposes are also considered. Therefore, the new design concepts tend to focus on adapting to the environmental climatic conditions and their changes, obtaining more competitive advantages. Energy efficiency becomes a key factor at the design phase. Wind and solar radiation are some examples of meteorological conditions that may rethink the projects to be designed and built in the following years. On the one hand, there are the high winds that carry sand from the desert. This causes a number of catastrophes in Arab countries, such as the covering of railway tracks, roads, or impact against building fronts. On the other hand, solar radiation can cause temperatures in buildings to rise up to alarming and sometimes unfeasible levels, e.g., curtain walls can reach temperatures of up to 80 C, forcing to the use of indoor air cooling. Therefore, this possibility represents a great opportunity to reduce CO2 emissions into the atmosphere. The use of faade elements that fold or unfold depending on the project location and the season of the year can reduce energy consumption as has been shown in previous published research. Considering a folded position, an improvement of natural light usage instead of artificial lighting is possible. Moreover, half-folded or totally unfolded important heat transfer is reached allowing to decrease the use of air conditioning. There are examples of buildings with adaptable building faades in the world, or Origami implementations for shape optimization considering structural performance. Figure 1 shows some of the most prominent applications in this field. Appl. Sci. 2022, 12, x FOR PEER REVIEW 3 of 15 sometimes unfeasible levels, e.g., curtain walls can reach temperatures of up to 80 °C, forcing to the use of indoor air cooling. Therefore, this possibility represents a great opportunity to reduce CO2 emissions into the atmosphere. The use of faade elements that fold or unfold depending on the project location and the season of the year can reduce energy consumption as has been shown in previous published research. Considering a folded position, an improvement of natural light usage instead of artificial lighting is possible. Moreover, half-folded or totally unfolded important heat transfer is reached allowing to decrease the use of air conditioning. There are examples of buildings with adaptable building faades in the world, or Origami implementations for shape optimization considering structural performance. Figure 1 shows some of the most prominent applications in this field. These projects were characterised by a dynamic adaptation of faade elements to external conditions. An example is the Kiefer technic Showroom built in 2007, with a fully automated faade control system. Each user has control over the faade element which affects only to the internal user space. Syddansk Universitet in Denmark holds a system that regulates the interior temperature according to the external climatic conditions. The One Ocean in South Korea uses natural materials that are susceptible to deformation depending on external conditions. Lastly, the Al Bahr Tower was built between 2009 and 2012 in Abu Dhabi, reaching 147 metres high. This building, due to its height and the important sun radiation along the day, was designed with intelligent faade based on origami techniques. This concept allows to each faade element to modify its geometry according to the sun position, reducing carbon dioxide emissions in the order of 1750 tonnes per year. However, obtaining all information regarding temperature, humidity, ventilation, lighting, or even occupant behaviour is not so simple. There are discrepancies between simulation predictions and real energy use. Designing efficient energy buildings with These projects were characterised by a dynamic adaptation of faade elements to external conditions. An example is the Kiefer technic Showroom built in 2007, with a fully automated faade control system. Each user has control over the faade element which affects only to the internal user space. Syddansk Universitet in Denmark holds a system that regulates the interior temperature according to the external climatic conditions. The One Ocean in South Korea uses natural materials that are susceptible to deformation depending on external conditions. Lastly, the Al Bahr Tower was built between 2009 and 2012 in Abu Dhabi, reaching 147 metres high. This building, due to its height and the important sun radiation along the day, was designed with intelligent faade based on origami techniques. This concept allows to each faade element to modify its geometry according to the sun position, reducing carbon dioxide emissions in the order of 1750 tonnes per year. However, obtaining all information regarding temperature, humidity, ventilation, lighting, or even occupant behaviour is not so simple. There are discrepancies between simulation predictions and real energy use. Designing efficient energy buildings with good indoor environment involves elements of expertise derived from multiple disciplines such as architects, civil, mechanical, and electrical engineers. Consequently, BIM is being increasingly used to design buildings. The integration of design and management in a single tool allows a faster and more flexible design process, enabling and easing the production of multiple alternatives that also consider the conservation and operation of the building, in both construction aspects and energy savings. All the shapes and volumetric possibilities of the design are assessed to optimise energy efficiency. In addition, it is possible to link the as-built model to other types of activities, such as the facility management once the project has been completed. Consequently, the costs derived from these activities can also be optimised. BIM and Energy Management Energy cost or infrastructure energy consumption simulation are concepts being increasingly important currently. Sustainability has become a key factor in construction projects and it is widespread in AEC tools. The same occurs with BIM. BIM profits in already existing buildings are well-known. As-built management documentation, maintenance, quality control, parameter monitoring 28], emergency management, or space management are examples of it. In addition to all the referred profits, energy management has also shown to be an attractive alternative. Commonly named as Building Energy Modelling (BEM), BIM-based management techniques have been applied to control, analyse, and manage energy [22,. Published research has shown the outstanding possibilities of BEM for the energetic simulation of infrastructure. Those BEM techniques permit connecting BIM models with external databases which collect the main material properties, allowing a detailed energetic analysis. Another main advantage attributed to BEM is data visualization. Through use of the BIM model, data visualization environment is provided in a user friendly graphical way. This allows an ease understanding data and information. In this way, Jen and Vernatha performed a complete energy simulator based on BEM, providing real time data through the infrastructure BIM model. Heritage management, as one of the most important BIM research lines for the built environment, has also been extrapolated to energy management field. Technical data, consumption projections, historical consumptions or location information are essentials for infrastructure management. Alahmad et al. proposed a combination of sensors and elements of the BIM model for the electrical system of the infrastructure. Other researchers such as Woo et al. associated energy consumption with parameters such as temperature, humidity or occupation. Another field related with BEM is the one referred to data interoperability. There are multiple information databases hosting various material properties and its synchronization with each element of the BIM model is not a challenge and even more if the great variety software is considered. As has been reviewed, a large number of published research applied BEM for infrastructure energy management. However, concepts as BIM and Origami applied in a single way for energy management purposes would be considered a novel topic for future research lines due to the lack of research in this regard. Methodology Although there are published research and applications either in the application of origami techniques in building or in the use of BIM, it is not common to find applications of the two concepts together. On the one hand, origami techniques allow the geometry of building elements to be modified. In this way, the behaviour of these elements will change according to external parameters such as humidity, temperature or wind, making it possible to reduce their impact on the infrastructure. On the other hand, the application of origami techniques in construction elements implies an important challenge in all project phases, such as design, construction or operation and maintenance. It is precisely this factor that justifies the use of BIM for the development and implementation of origami techniques in construction and building fields. The basic information associated with the AEC sector must be known before modelling the element in BIM, according to the origami technique. First, a multi-criteria analysis of all the properties of the various alternatives must be carried out. In addition, their application to an adaptive building element of the infrastructure must be considered in order to determine which technique is most suitable for the project. Aspects such as the fit between geometric figures, construction requirements, slimness, wear of parts or behaviour of the cast shadow were considered. The element was then defined for modelling in BIM after selecting the origami technique to be implemented. This selection uses a multi-criteria analysis considering parameters such as geometry of the element, dimension, or material. Subsequently, an adaptative element was added to the three-dimensional model of a generic building in order to simulate solar radiation by using specific software. As the project considers adapting the infrastructure model to the Origami element rather than the other way around, the modelling process began with the Origami constructive element. It started decomposing the element into simple geometric shapes. Then, the dimensions of the element were parameterised to affect its geometry considering the solar path phases. The element was then copied along to two of the four faades considered, obtaining 64 pieces. Finally, the solar trajectory was parameterised, affecting the opening and closing of the different adaptive construction components depending on the location of the building, its orientation and the relative position of the earth respect to the sun in each season of the year. Figure 2 shows the methodology followed which main objective was the design and development of an adaptive construction element using origami techniques. This was placed on the outer surface of the building and can reply automatically according to the sun trajectory. tor that justifies the use of BIM for the development and implementation of origami techniques in construction and building fields. The basic information associated with the AEC sector must be known before modelling the element in BIM, according to the origami technique. First, a multi-criteria analysis of all the properties of the various alternatives must be carried out. In addition, their application to an adaptive building element of the infrastructure must be considered in order to determine which technique is most suitable for the project. Aspects such as the fit between geometric figures, construction requirements, slimness, wear of parts or behaviour of the cast shadow were considered. The element was then defined for modelling in BIM after selecting the origami technique to be implemented. This selection uses a multi-criteria analysis considering parameters such as geometry of the element, dimension, or material. Subsequently, an adaptative element was added to the three-dimensional model of a generic building in order to simulate solar radiation by using specific software. As the project considers adapting the infrastructure model to the Origami element rather than the other way around, the modelling process began with the Origami constructive element. It started decomposing the element into simple geometric shapes. Then, the dimensions of the element were parameterised to affect its geometry considering the solar path phases. The element was then copied along to two of the four faades considered, obtaining 64 pieces. Finally, the solar trajectory was parameterised, affecting the opening and closing of the different adaptive construction components depending on the location of the building, its orientation and the relative position of the earth respect to the sun in each season of the year. Figure 2 shows the methodology followed which main objective was the design and development of an adaptive construction element using origami techniques. This was placed on the outer surface of the building and can reply automatically according to the sun trajectory. Origami Properties and Multicriteria Analysis There are many applications of origami techniques in the AEC sector as well as a variety of ways of implementation. For this reason, research of the different possible geometries to be applied had to be carried out, in order to select the most adequate for the main target. A multi-criteria analysis of the different options was created, according to the following properties: simplicity, aesthetics, rigidity, fit between elements, economics, functionality, minimum displacement, and material savings. The shapes considered were: pai-pai, umbrella, quadrangular dome, triangular, pentagonal, wheel, and cordillera. Once all of them had been defined, they were confronted in a matrix ready to be valued. Based on the literature and the team criteria, each property of each origami shape was valued from 0 to 1. Results are argued and revaluated during several evaluation rounds. The results are shown in Figure 3. The multi-criteria analysis shows quadrangular as the most viable geometric shape, obtaining a total score of 0.84 out of 1. However, the shapes triangle and mountain were only one hundredth of a point behind, with a score of 0.83 out of 1. Further behind were the geometric shapes pai pai and pentagon. Lastly, and with the lowest score, was Umbrella. Once all of them had been defined, they were confronted in a matrix ready to be valued. Based on the literature and the team criteria, each property of each origami shape was valued from 0 to 1. Results are argued and revaluated during several evaluation rounds. The results are shown in Figure 3. The multi-criteria analysis shows quadrangular as the most viable geometric shape, obtaining a total score of 0.84 out of 1. However, the shapes triangle and mountain were only one hundredth of a point behind, with a score of 0.83 out of 1. Further behind were the geometric shapes pai pai and pentagon. Lastly, and with the lowest score, was Umbrella. Therefore, the quadrangular shape was considered the most suitable for the project. It has high qualifications in all properties, especially aesthetics and the so-called FIT, which refers to the fit with respect to other shapes. In addition, it had already been used in the Al Bahr Towers project, given its alignment aspect, which refers to the alignment between its vertices. Therefore, the quadrangular shape was considered the most suitable for the project. It has high qualifications in all properties, especially aesthetics and the so-called FIT, which refers to the fit with respect to other shapes. In addition, it had already been used in the Al Bahr Towers project, given its alignment aspect, which refers to the alignment between its vertices. Solution Once the most viable geometric shape for implementation in this project was selected, the final solution was developed. Three key aspects needed to be detailed: The definition of the construction element based on the selected shape. The definition of the generic building. The modelling of both the element and the building by using BIM. Definition of the Origami Element The properties of the selected geometrical option had to be defined for its implementation in BIM. In addition, constructive aspects such as the uprights and rails, needed to make the element fold according to origami principles, had to be considered. One of the main concepts to be detailed was the geometry of the folded and unfolded position. As shown in Figure 4, the geometric shape bears resemblance of an x in the folded position, while the unfolded shape also resembles a + symbol. The central point of both was the vertex with the maximum height, so fewer lanes are required due to this feature. The modelling of both the element and the building by using BIM. Definition of the Origami Element The properties of the selected geometrical option had to be defined for its implementation in BIM. In addition, constructive aspects such as the uprights and rails, needed to make the element fold according to origami principles, had to be considered. One of the main concepts to be detailed was the geometry of the folded and unfolded position. As shown in Figure 4, the geometric shape bears resemblance of an x in the folded position, while the unfolded shape also resembles a + symbol. The central point of both was the vertex with the maximum height, so fewer lanes are required due to this feature. Thus, the movement of the element will behave according to origami principles, allowing the creation of concave (+) and convex (x) folds as can be seen in Figure 4. These dynamic elements must be anchored to the structure, thus the distance between floors was considered as the main lateral dimension. Hence, each figure will have an affected area of about nine square metres in its unfolded state and the elements can be easily anchored. In addition, each horizontal row of the elements will cover one building floor. Like geometry, the material plays a fundamental role in the behaviour of the element. A lightweight material has been applied, which does not add too much overload to the building. This material is also resistant to adverse weather conditions, flame retardant and is not opaque when fully unfolded, i.e., it allows a minimum of natural light. There are two materials Thus, the movement of the element will behave according to origami principles, allowing the creation of concave (+) and convex (x) folds as can be seen in Figure 4. These dynamic elements must be anchored to the structure, thus the distance between floors was considered as the main lateral dimension. Hence, each figure will have an affected area of about nine square metres in its unfolded state and the elements can be easily anchored. In addition, each horizontal row of the elements will cover one building floor. Like geometry, the material plays a fundamental role in the behaviour of the element. A lightweight material has been applied, which does not add too much overload to the building. This material is also resistant to adverse weather conditions, flame retardant and is not opaque when fully unfolded, i.e., it allows a minimum of natural light. There are two materials that meet these requirements: Teflon with fibreglass or silver carbon fibre. Both materials can be suitable for covering the surface between the steel frames that form each triangular element, as shown in orange in Figure 4. Definition of the Building The objective was to model a generic building that can contain a faade based on the origami elements described above. To do so, a simple building design was sought, focusing on only two aspects: dimensions and orientation. According to studies of actual applications of this type of faade, they tend to be more common in skyscrapers. Therefore, a building with an octahedral shape of 21 m on each side of the base and 48 m in height was proposed. This is equivalent to 16 floors in height with a distance between floors of three metres. It is important to note that the structure supporting the origami element must be three metres away from the internal face of the building. This is done to ease the circulation of air currents and to avoid heat accumulation, as well as to improve maintenance tasks. One of the final objectives of the project was the development of an automated system that acts according to the solar path, whereby orientation is a key factor. This orientation varies according to the project location. As indicated above, the building had four faades, two of which will be shielded and two of which will be unshielded. If the building was located in the northern hemisphere of the earth, the best position of the edge between the two shielded faades was facing to the south. If the building was in the southern hemisphere, the edge should face to the north. At the same time, the season of the year must be considered. As can be seen in Figure 5, in the summer, the sun rises over the horizon between east-northeast (ENE) and northeast (NE) and sets between west-northwest (WNW) and northwest (NW), which varies depending on the summer day. In spring and autumn, it rises between the near east-northeast (ENE) and east-southeast (ESE) directions and sets between west-northwest (WNW) and west-southwest (SW). Finally, the sun in winter rises between east-southeast (ESE) and southeast (SE) and sets between west-southwest (SW) and southwest (SW). It should be noted that the sun only rises in the east (E) and sets in the west (W) twice a year, called the vernal and autumnal equinoxes. The day of the year when the sun rises and sets closest to the north is called the summer solstice. The winter solstice occurs on the day of the year when the sun rises and sets closest to the south. According to studies of actual applications of this type of faade, they tend to be more common in skyscrapers. Therefore, a building with an octahedral shape of 21 metres on each side of the base and 48 metres in height was proposed. This is equivalent to 16 floors in height with a distance between floors of three metres. It is important to note that the structure supporting the origami element must be three metres away from the internal face of the building. This is done to ease the circulation of air currents and to avoid heat accumulation, as well as to improve maintenance tasks. One of the final objectives of the project was the development of an automated system that acts according to the solar path, whereby orientation is a key factor. This orientation varies according to the project location. As indicated above, the building had four faades, two of which will be shielded and two of which will be unshielded. If the building was located in the northern hemisphere of the earth, the best position of the edge between the two shielded faades was facing to the south. If the building was in the southern hemisphere, the edge should face to the north. At the same time, the season of the year must be considered. As can be seen in Figure 5, in the summer, the sun rises over the horizon between east-northeast (ENE) and northeast (NE) and sets between west-northwest (WNW) and northwest (NW), which varies depending on the summer day. In spring and autumn, it rises between the near east-northeast (ENE) and east-southeast (ESE) directions and sets between west-northwest (WNW) and west-southwest (SW). Finally, the sun in winter rises between east-southeast (ESE) and southeast (SE) and sets between west-southwest (SW) and southwest (SW). It should be noted that the sun only rises in the east (E) and sets in the west (W) twice a year, called the vernal and autumnal equinoxes. The day of the year when the sun rises and sets closest to the north is called the summer solstice. The winter solstice occurs on the day of the year when the sun rises and sets closest to the south. In this context, it was decided to place the building in the city of Madrid. Madrid is in the northern hemisphere with coordinates of 40 25 00. By means of the logic previously described, it is established that the edge between the two shielded faades must be oriented to the South. Therefore, these two facades will face Southwest and Southeast. BIM Once both the infrastructure and the dynamic construction element had been defined, the next step was to apply BIM to carry out the simulation of the project. For this reason, a study of the various available software alternatives on the market (Autodesk, Bentley and Rhinoceros 3D) was previously analysed. The 3D geometries to be modelled were complex and had to be animated in order to represent the transition from folded to unfolded position. Another basic requirement was that any desired aspect could be easily modified once the model was finished. Among all the possible options, the software chosen was Rhinoceros 3D, with Grasshopper for the parametric modelling of both the origami element and the building, and V-ray for the project image rendering. Origami element modelling The aim was to parameterise the geometry. For this purpose, the software Grasshopper was used. Figure 6 shows the process followed. The BIM element to be modelled is considered as "IfcShadingDevice", according to IFC Standards. The complete inheritance of it is "ifcRoot" > "IfcObjectDefinition" > "IfcProduct" > "IfcElement" > "IfcBuildingElement" and "IfcShadingDevice". BIM Once both the infrastructure and the dynamic construction element had been defined, the next step was to apply BIM to carry out the simulation of the project. For this reason, a study of the various available software alternatives on the market (Autodesk, Bentley and Rhinoceros 3D) was previously analysed. The 3D geometries to be modelled were complex and had to be animated in order to represent the transition from folded to unfolded position. Another basic requirement was that any desired aspect could be easily modified once the model was finished. Among all the possible options, the software chosen was Rhinoceros 3D, with Grasshopper for the parametric modelling of both the origami element and the building, and V-ray for the project image rendering. Origami element modelling The aim was to parameterise the geometry. For this purpose, the software Grasshopper was used. Figure 6 shows the process followed. The BIM element to be modelled is considered as "IfcShadingDevice", according to IFC Standards. The complete inheritance of it is "ifcRoot" > "IfcObjectDefinition" > "IfcProduct" > "IfcElement" > "IfcBuildingElement" and "IfcShadingDevice". The first step was to draw the plane that l contained the base of the piece, which is the initial square. A base of three metres by three metres was made, as detailed in the definition of the origami element. In addition, a repetition function was prepared in both the X and Y axis (both coinciding with the plane of the faade), so that when the whole piece was modelled, it was possible to repeat it. The first step was to draw the plane that l contained the base of the piece, which is the initial square. A base of three metres by three metres was made, as detailed in the definition of the origami element. In addition, a repetition function was prepared in both the X and Y axis (both coinciding with the plane of the faade), so that when the whole piece was modelled, it was possible to repeat it. Subsequently, three dynamic parameters were added. They were associated with an outer square (like the initial one) and an inner square rotated 90 with respect to the previous one, which can be identified as the "inner square". The outer square is shown in green at point 2 in Figure 6, while the rhombus is shown in green at point 3 in Figure 6. Dimension aI was the distance from the centre of the squares to the vertex of the outer square. The distance "b" is the one between the centre of all squares and the vertices of the inner square. The distance between the XY plane and the furthest point of the XY plane is set as h. Once the three dynamic distances have been identified, the complete structure was created. To do this, the different points of the initial square, the outer square, the inner square and the height were joined together, as detailed in point 5 of Figure 6. Once all the points had been joined together, the surface is created that gives rise to the final origami figure. An example of how the parameterisation of this geometry works is shown in Figure 7, where the entire arrangement of points is modified according to a single value, the height h. is set as h. Once the three dynamic distances have been identified, the complete structure was created. To do this, the different points of the initial square, the outer square, the inner square and the height were joined together, as detailed in point 5 of Figure 6. Once all the points had been joined together, the surface is created that gives rise to the final origami figure. An example of how the parameterisation of this geometry works is shown in Figure 7, where the entire arrangement of points is modified according to a single value, the height h. This fully parameterised origami element was replicated along the two fictitious faades. The origami elements were cloned along the X and Y axes. The elements of each faade created a panel in the XY plane. From this panel, a point of X and Y coordinates was created, which simulated the position of the sun. Depending on this point, the heights of the origami elements (4th point of Figure 6) vary between 0 and 1. In this way, the elements had different degrees of openness depending on this point, as it is detailed in Figure 7. The location of all the elements on the faade and the different degree of openness depending on the sun can be seen in Figure 8. ades. The origami elements were cloned along the X and Y axes. The elements of each faade created a panel in the XY plane. From this panel, a point of X and Y coordinates was created, which simulated the position of the sun. Depending on this point, the heights of the origami elements (4th point of Figure 6) vary between 0 and 1. In this way, the elements had different degrees of openness depending on this point, as it is detailed in Figure 7. The location of all the elements on the faade and the different degree of openness depending on the sun can be seen in Figure 8. Generic building modelling As indicated above, an octahedral geometry with only three parametric distances was chosen: the diagonal of the base square and the sides of the square. This enabled modifying the faade areas quickly and easily. A common distance between floors of three metres and a distance between columns of six metres were considered. The modelling was done with the same tool used for the origami element modelling, the Grasshopper software. Finally, the rendering of the infrastructure was performed. For this step, interoperability between tools plays as critical factor. For this reason, the render was performed by using the V-ray plug-in developed by Rinhoceros. Renderings were made in different time periods, showing the real behaviour of the faade's protective elements depending on the solar path. Results Considering the initial objectives, the research project achieved the results of the initial planning. The main objective was based on the successful application of BIM together with origami techniques. For this purpose, a study of the different possible techniques to be implemented and the real applications in the AEC sector were carried out. Subsequently, different geometries were studied to obtain the most suitable one according to a multi-criteria analysis. That showed the quadrangular as the most suitable origami shape. A parameterisation of the origami faade element by means of BIM was performed. The use of Grasshopper software was chosen for the application of this methodology. A generic building with an octahedral shape was modelled. The base of the infrastructure was a square that could be parameterised in all its dimensions. The location of the infrastructure was assumed to be in Madrid in order to consider the solar path. Two of the four Generic building modelling As indicated above, an octahedral geometry with only three parametric distances was chosen: the diagonal of the base square and the sides of the square. This enabled modifying the faade areas quickly and easily. A common distance between floors of three metres and a distance between columns of six metres were considered. The modelling was done with the same tool used for the origami element modelling, the Grasshopper software. Finally, the rendering of the infrastructure was performed. For this step, interoperability between tools plays as critical factor. For this reason, the render was performed by using the V-ray plug-in developed by Rinhoceros. Renderings were made in different time periods, showing the real behaviour of the faade's protective elements depending on the solar path. Results Considering the initial objectives, the research project achieved the results of the initial planning. The main objective was based on the successful application of BIM together with origami techniques. For this purpose, a study of the different possible techniques to be implemented and the real applications in the AEC sector were carried out. Subsequently, different geometries were studied to obtain the most suitable one according to a multicriteria analysis. That showed the quadrangular as the most suitable origami shape. A parameterisation of the origami faade element by means of BIM was performed. The use of Grasshopper software was chosen for the application of this methodology. A generic building with an octahedral shape was modelled. The base of the infrastructure was a square that could be parameterised in all its dimensions. The location of the infrastructure was assumed to be in Madrid in order to consider the solar path. Two of the four faades were designed to be covered, with the south-facing edge being the one chosen between the shielded faades. This origami element was dynamic and protected these facades by varying its geometry according to the theoretical position of the sun. The modification of its geometry was based on the application of origami techniques and its parameterisation by using BIM. A BIM-based automated system capable of modifying the geometry of Origami faade elements based on the sun trajectory is the main result showed in Figure 9. Nevertheless, the process of Origami-BIM element can be considered as a significant result. The methodology followed in the project can be of interest and a useful tool for designers in order to consider the possibilities or Origami shapes and the use of software. theless, the process of Origami-BIM element can be considered as a significant result. The methodology followed in the project can be of interest and a useful tool for designers in order to consider the possibilities or Origami shapes and the use of software. The scope of this study provides a great basis for the development of future studies, with sustainability and energy efficiency being their major objectives. The synthesis of BIM and origami techniques represents a breakthrough in the development of the sector, the technical feasibility of which was demonstrated by the project outcomes. Conclusions This study proposed a methodology for the design and management of the operation of a modular and adaptive faade for highly glassed buildings. The proposed modules were deployable and were inspired by the origami technique. This methodology combined BIM with the generation of the mobility of the modules based on the environmental conditions of lighting and temperature. These conditions were set in real time by the location and the season of the year under evaluation. Faades are the first line of defence against environmental conditions. This study uses a dynamic envelope that prevents or allows the sun to shine into the building. Their use would provide significant savings in energy consumption and reductions in carbon emissions, as well as increases interior visual comfort (i.e., interior lighting levels). Therefore, the initial investment in the installation of adaptive faades would be balanced by these The scope of this study provides a great basis for the development of future studies, with sustainability and energy efficiency being their major objectives. The synthesis of BIM and origami techniques represents a breakthrough in the development of the sector, the technical feasibility of which was demonstrated by the project outcomes. Conclusions This study proposed a methodology for the design and management of the operation of a modular and adaptive faade for highly glassed buildings. The proposed modules were deployable and were inspired by the origami technique. This methodology combined BIM with the generation of the mobility of the modules based on the environmental conditions of lighting and temperature. These conditions were set in real time by the location and the season of the year under evaluation. Faades are the first line of defence against environmental conditions. This study uses a dynamic envelope that prevents or allows the sun to shine into the building. Their use would provide significant savings in energy consumption and reductions in carbon emissions, as well as increases interior visual comfort (i.e., interior lighting levels). Therefore, the initial investment in the installation of adaptive faades would be balanced by these energy savings. In addition, it is very important that such elements follow the origami philosophy in order to change its properties in real time. This means adjusting their position and size according to the momentary interest, allowing or blocking the light flow. Applying BIM to the design of the origami elements with conventional software had several drawbacks. When an element was modelled, its dimensions, position, or shape are usually static and invariable. However, a parametric design was needed to modify its parameters integrally. For this purpose, commercial parametric design software such as Grasshopper was used. This is an add-on to the Rhinoceros 3D CAD program that allowed the development of complex parametric designs from generator components. In addition, it also included the V-ray tool for obtaining high quality renderings. Therefore, BIM may be incorporated into projects that follow the principles of origami, improving their sustainability and habitability. In conclusion, the novelty of this project resides on the combination of BIM and Origami techniques in a single way. The origami technique most suitable geometry selection is reported as well as origami implementation, parametrization of origami element, BIM sun trajectory implementation, or Origami BIM element variation according to the sun trajectory. With these results, future investigation lines were opened, i.e., detailed mechanism for folding or unfolding origami elements, construction details of each origami element, or BIM-based operating system for manual operation of BIM elements. |
/*
* Copyright 2021 Zindex Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import type {Animation} from "../../Core";
export const RULER_PADDING: number = 6;
export const RULER_DIVISIONS: number = 30;
export const FRAME = 1000 / RULER_DIVISIONS;
export const MAJOR_GRADUATION_WIDTH: number = 240;
export const MINOR_GRADUATION_WIDTH: number = MAJOR_GRADUATION_WIDTH / RULER_DIVISIONS;
export const UNIT: number = MAJOR_GRADUATION_WIDTH / 1000;
export function roundTime(time: number, scaleFactor: number = 1): number {
if (time <= 0) {
return 0;
}
const frame = scaleFactor * FRAME;
return Math.round(Math.round(time / frame) * frame);
//
// time = Math.round(time);
// const frame = Math.round(FRAME);
// time = time - time % frame;
// const totalFrames = time / frame;
// const elapsedFrames = totalFrames % RULER_DIVISIONS;
// const elapsedSeconds = (totalFrames - elapsedFrames) / RULER_DIVISIONS;
// return Math.round(elapsedSeconds * 1000 + elapsedFrames * FRAME);
}
export function getTimeAtX(x: number, scroll: number, zoom: number): number {
return (x + scroll - RULER_PADDING) / zoom / UNIT;
}
export function getXAtTime(time: number, scroll: number, zoom: number): number {
return time * UNIT * zoom - scroll + RULER_PADDING;
}
export function getDeltaTimeByX(x: number, zoom: number): number {
return x / zoom / UNIT;
}
export function getRoundedDeltaTimeByX(x: number, offset: number, zoom: number, scaleFactor: number): number {
return roundTime(offset + getDeltaTimeByX(x, zoom), scaleFactor) - offset;
}
const SCALES = [0.05, 0.1, 0.125, 0.25, 0.5];
const LAST_SCALE = SCALES.length - 1;
export function getScaleFactor(zoom: number): number {
if (zoom > SCALES[LAST_SCALE]) {
return 1;
}
for (let i = 0; i <= LAST_SCALE; i++) {
if (zoom <= SCALES[i]) {
return 1 / SCALES[i];
}
}
return 1;
}
export function renderRuler(context: CanvasRenderingContext2D, width: number, height: number, scroll: number, zoom: number, scaleFactor: number): void {
scroll /= zoom;
const t = scroll / MINOR_GRADUATION_WIDTH;
let graduationNo = Math.floor(t);
const delta = ((Math.round(t * 100) - graduationNo * 100) / 100);
let x = RULER_PADDING > scroll ? RULER_PADDING - scroll : RULER_PADDING;
x -= delta * MINOR_GRADUATION_WIDTH * zoom;
const step = MINOR_GRADUATION_WIDTH * zoom;
const path = new Path2D();
while (true) {
path.moveTo(x + 0.5, height);
if (graduationNo % (RULER_DIVISIONS * scaleFactor) === 0) {
path.lineTo(x + 0.5, height - 20);
context.fillText(formatSeconds(graduationNo / RULER_DIVISIONS), Math.ceil(x) + 4.5, height - 15);
} else if (graduationNo % (RULER_DIVISIONS * scaleFactor / 2) === 0) {
path.lineTo(x + 0.5, height - 15);
} else if (graduationNo % scaleFactor === 0) {
path.lineTo(x + 0.5, height - 10);
}
x += step;
graduationNo++;
if (x > width) {
break;
}
}
context.stroke(path);
}
export function getDurationBounds(startTime: number, endTime: number, width: number, scroll: number, zoom: number): [number, number] | null {
let start = getXAtTime(startTime, scroll, zoom);
if (start <= 0) {
start = 0;
}
let stop = getXAtTime(endTime, scroll, zoom);
if (stop < 0) {
stop = 0;
} else if (stop - start > width) {
stop = width + start;
}
if (stop <= start) {
return null;
}
return [start, stop];
}
export function formatSeconds(s: number): string {
let m = (s - s % 60) / 60;
let h = (m - m % 60) / 60;
m = m % 60;
s = s % 60;
if (h > 0) {
return `${h}:${m > 9 ? m : '0' + m}:${s > 9 ? s : '0' + s}`;
}
return `${m > 9 ? m : '0' + m}:${s > 9 ? s : '0' + s}`;
}
export function formatTime(time: number): string {
time = Math.round(time);
let k = time % 1000;
time = (time - k) / 1000;
let s = time % 60;
time = (time - s) / 60;
// let m = time % 60;
// time = (time - m) / 60;
// return `${time}:${m > 9 ? m : '0' + m}:${s > 9 ? s : '0' + s}:${k > 99 ? k : '0' + (k > 9 ? k : '0' + k)}`;
return `${time > 9 ? time : '0' + time}:${s > 9 ? s : '0' + s}.${k > 99 ? k : '0' + (k > 9 ? k : '0' + k)}`;
}
|
Early Motor Signs in Autism Spectrum Disorder A growing number of literature data suggest the presence of early impairments in the motor development of children with autism spectrum disorder, which could be often recognized even before the appearance of the classical social communication deficits of autism. In this narrative review, we aimed at performing an update about the available data on the early motor function in children with autism spectrum disorder. Early motor impairment in these children can manifest itself both as a mere delay of motor development and as the presence of atypicalities of motor function, such as a higher rate and a larger inventory, of stereotyped movements both with and without objects. In the perspective of a timely diagnosis, the presence of early motor signs can be an important clue, especially in an individual considered at high risk for autism. Motor and communication (both verbal and non-verbal) skills are connected and a pathogenetic role of early motor dysfunctions in the development of autism can be hypothesized. From this, derives the importance of an early enabling intervention aimed at improving motor skills, which could also have favorable effects on other aspects of development. Introduction Autism Spectrum Disorder (ASD) is a clinical condition characterized by social communication and interaction deficits, as well as by restricted interests and repetitive behaviors, according to the criteria of the Diagnostic and Statistical Manual of Mental Disorders, Fifth Edition (DSM-5). ASD diagnosis is still clinical; apart from the core signs of autism, for some time a large number of heterogeneous signs of motor development impairment have been reported in infants and children with ASD. Below, we report some of the most relevant of these signs described in the literature during the past few years: delayed motor development; persistent asymmetry when lying on the stomach at 4 months of age; righting from the supine to the prone position moving all the body en bloc not in a corkscrew fashion; abnormal patterns of crawling; walking asymmetry; sequencing instead of superimposition of one movement on the other for example during gait; unusual positions of arms; poor coordination; muscle tone and reflex abnormalities; choreiform movement of extremities; impaired finger-thumb opposition; stereotyped movements of the body, limbs, and fingers, including hand flapping; unusual gait patterns, including walking on tiptoes; poor motor imitation; impairment of postural control. An approximate idea of the prevalence of motor impairment in ASD is given by Ming et al., who in 154 children found: hypotonia in 51% of cases, apraxia in 34%, walking on tiptoes in 19%, and gross motor delay in 9%. These percentages, however, are probably underestimated, due to the relatively high median age (6 years) of the series studied, while over time the early motor signs tend to reduce and however, they often go unnoticed compared to what are considered the core signs of autism. Yet, despite all these data, to this day, motor function impairments are not considered as diagnostic criteria of ASD, so much so that they are included only in the associated clinical features of ASD, according to DSM-5. Particularly during the last Results Motor impairments are very important in ASD because they are possible early clinical markers and also because of their possible pathogenetic contribution to the development of social communication deficits in children with ASD, due to the basic role played by the motor system for exploring and knowing the surrounding environment. Early motor dysfunction can manifest itself in ASD children as a mere delay in the acquisition Results Motor impairments are very important in ASD because they are possible early clinical markers and also because of their possible pathogenetic contribution to the development of social communication deficits in children with ASD, due to the basic role played by the motor system for exploring and knowing the surrounding environment. Early motor dysfunction can manifest itself in ASD children as a mere delay in the acquisition on the motor domain or in the form of atypicalities of motor development. We will deal with both aspects. We will then discuss modern techniques to evaluate early motor dysfunction, the issue of children at high risk (HR) for ASD, the relationship between motor and social communication skills, and the effects of the treatment of motor dysfunction. Early Motor Impairment in ASD Children: Delay of Motor Development Already during the first year of life, a delay of both gross and/or fine motor abilities has been reported by several authors. As suggested by Arabameri and Sotoodeh, a delayed age of acquisition of sitting without support (mean months: 7.64), standing without support (mean months: 13.22), and walking alone (mean months: 18.31) has been found in ASD children. Reindal et al. studied the age of first walking and its relationship with ASD symptom severity in a sample of 490 children (23% females), subdivided into ASD (n = 376) and non-ASD (n = 114) groups. ASD children achieved independent walking significantly later than non-ASD children (mean: 14.7 versus 13.8 months, respectively). Age of first walking turned out to be significantly associated with ASD symptom severity, and females showed a non-significant later age of first walking. The authors concluded that in children with delayed independent walking ASD should be considered for differential diagnosis, perhaps especially in females. Bolton et al., performed a prospective longitudinal cohort study about the offspring of 14,541 women. According to the parents' reports, children who later received a diagnosis of ASD already showed differences in fine motor skills, as well as in communication ones, from the age of 6 months. Davidovitch et al. studied in the first 24 months of life the developmental trajectory of 335 low-risk (LR) infants who later received a diagnosis of ASD. They found that by 9 months of age ASD children started to fail the communication, as well as motor items compared to typical and delayed non-ASD children. In the context of a longitudinal birth cohort study, Elberling et al., assessed infant mental health and development from birth to the age of 10 months. Mental health outcome was studied in 1585 individuals aged 5-7 years. Overall development problems and specific oral-motor development problems were found to be predictors of ASD. Pusponegoro et al., performed a cross-sectional study considering gross motor and socialization skills in 40 ASD children aged 18 months-6 years and 40 agematched typically developed (TD) controls. The mean gross motor score on the Vineland Adaptive Behavior Scales (2nd edition) was significantly lower in ASD children than in controls and the differences prevailed in skills requiring complex coordination such as ball throwing and catching, using stairs, jumping, and bicycling. Further, ASD children with gross motor deficits showed a mean socialization domain score significantly lower than those without gross motor impairments. Oien et al., found that children who passed the M-CHAT (Modified Checklist for Autism in Toddlers) screening at 18 months and instead received later a diagnosis of ASD (so called false-negative cases) however presented delays and atypical features in social communication as well as fine motor domains at 18 months. Differences seemed to prevail in girls. Among a general population sample of 515 infants (mean age 12.9 months), Kovaniemi et al., found that infants screened in the Of-Concern range on the ASD item cluster of the Brief Infant-Toddler Social and Emotional Assessment, administered to their parents, showed later achievement ages for gross motor skills than infants with the corresponding No Concern screen status. Nishimura et al., found that ASD diagnosis may be predicted based on the neurodevelopmental trajectories during the first 2 years of life. The authors assessed neurodevelopment in 952 infants at seven time points up to the age of 24 months. At 32 months, ASD was diagnosed in 3.1% of the children. The authors identified five neurodevelopmental classes: high normal, normal, low normal, delayed, and markedly delayed. The probability of an ASD diagnosis was highest (32.6%) in the markedly delayed class in comparison with the others: respectively 6.4% and 4.0% for delayed and low normal classes, and 0% both in the normal and high normal classes. Lebarton and Landa studied motor skills in 51 LR and 89 HR individuals aged 6 months. Note that, in general, an infant is considered at HR for ASD if he/she has a sibling with ASD and/or was born preterm presenting low birth weight, however, in the literature, subjects at HR for ASD almost always mean individuals who have a family history of ASD in an older sibling. Among the 89 HR infants reported by Lebarton and Landa,20 were later diagnosed with ASD. Results showed that motor development at age 6 months was correlated with ASD status at age 24-36 months, that is ASD was associated with lower early motor abilities. Landa et al. studied prospectively and longitudinally social, language, and motor development in 235 HR and LR children, aged 6-36 months, subdivided into: ASD identified by 14 months, ASD identified after 14 months, and no ASD. ASD children exhibited a developmental level similar to non-ASD children at age 6 months, but thereafter they showed atypical trajectories. Impairment from 14 to 24 months prevailed in the early-ASD compared to the later-ASD group, but was similar at 36 months. About half of ASD children showed a period of around 2 years characterized by development of quantitative aspects within normal limits according to the results of standardized tests, but during this period development decelerated, typical social engagement decreased, and autistic symptoms emerged. During the ASD preclinical period the first signs of developmental disruption are likely to be nonspecific, involving, for example, communication or motor delay. The authors highlighted the importance of a developmental screening at regular intervals to be started by the time of the first birthday, in order to improve the early detection of the first signs possibly associated with preclinical ASD or with non-ASD delays. However, this study also highlights that the administration of standardized tests may not be sufficient to detect mild motor dysfunction. Licari et al. evaluated through a prospective study the motor domain in 96 infants with early autism signs aged 9-14 months. At baseline motor difficulties were very frequent, affecting 63/96 infants (65.6%) in the gross motor domain and 29/96 infants (30.2%) in the fine motor domain. At a 6-month follow-up, 23/63 infants (36.5%) maintained gross motor difficulties, while 20/29 (69.0%) infants continued to show fine motor difficulties. Lower fine motor skills at baseline and follow-up were associated with greater severity of autism signs. The results underline the potential clinical value of motor skills' evaluation within early autism screening. Sasayama et al. studied 1067 children who had been screened for ASD at the age of 18 months. At age 6 years, 3.1% of them were diagnosed with ASD. Higher rates of difficulties in motor abilities as well as in social communication skills were found in ASD children at 18 months of age. However, early motor dysfunction does not appear to be exclusive to ASD, as suggested by Hirota et al., who studied prospectively charted developmental milestones' data obtained from home-based records in 720 children aged 5 years. All 720 children were evaluated to ascertain a possible diagnosis of neurodevelopmental disorder (NDD), including ASD, and 124 of them received a diagnosis of ASD, while 331 of them received a diagnosis of non-ASD NDD. Compared to children without NDD diagnosis, those with NDDs showed greater rates of potential delays in developmental domains, including also the motor domain, at as early as 12 months of life or even earlier. No significant differences were found between ASD and non-ASD NDD groups concerning the motor domain. Language regression is reported in about 25% of ASD children. Manelis et al., in a sample of 218 ASD children, identified 36 cases who showed definite language regression and compared them to 104 cases without regression. The age at which ASD cases with language regression reach key developmental milestones such as crawling, walking, and first words was significantly younger than the age of cases without regression and similar to that of TD children. Yet, despite this, children with language regression were diagnosed with more severe ASD symptoms than children without regression. The link between autism and motor delay is suggested not only by clinical data, but also by genetic data. Takahashi et al. studied in 734 children from the general population the possible association of the polygenic risk score for ASD (representing an estimate of the genetic liability to ASD) with neurodevelopmental progress. They found that genetic risks for ASD might be related to delays in the gross motor domain as well as in the receptive language domain (See Table 1 for a Summary). Already during the first year of life, a delay of both gross and/or fine motor abilities has been reported in ASD children. In particular, a delayed age of acquisition of sitting without support, standing without support, and walking alone has been found. Early Motor Impairment in ASD Children: Atypicalities of Motor Function For the purposes of an early diagnosis of ASD, not only a possible delay in the acquisition of motor skills should be taken into consideration, but also and perhaps above all the possible presence of atypical motor patterns, which are clearly more difficult to observe than a mere developmental delay during the clinical practice particularly with younger children. From 9 weeks gestational age to 21 weeks post-term, so called general movements (GMs) are a pattern of spontaneous movements performed without external stimulation, at first with the appearance of writhing movements (elliptical in form), then, from around 9th week post-term, with the appearance of fidgety movements (circular in form). Alterations of GMs are generally suggestive of an impairment of the central nervous system. Phagava et al. performed a retrospective study by analyzing the home videos of 20 infants later diagnosed as ASD. Compared to controls, ASD infants showed more often a poor repertoire of writhing GMs (with a lack of variable sequences, amplitude, and speed) as well as abnormal or absent fidgety movements. Einspieler et al., reviewed literature, finding that 17 out of 25 ASD individuals (68%) and 100% of 17 individuals with Rett syndrome showed abnormal GMs during the first 5 months of life. Zappella et al., through a retrospective study of home videos, recorded between birth and 6 months of life, compared the early development of eight males with transient autistic behaviors (lost after the age of 3) and that of ten males later diagnosed with ASD. Abnormal GMs were found significantly more frequently in infants later diagnosed with ASD than in those with only transient autistic behaviors. This was while eye contact, responsive smiling, and pre-speech vocalizations as well as concurrent motor repertoire including postures did not differentiate between the two groups. Loh et al. studied longitudinally motor behaviors coded from videotapes at 12 and 18 months in eight siblings later diagnosed with ASD, in nine non-diagnosed siblings, and in 15 controls. They found that the ASD group "arm waved" more frequently at 12 and 18 months, while the ASD and non-diagnosed group showed one posture ("hands to ears") more frequently than the controls at 18 months. Morgan et al. analyzed the videotapes of 50 ASD infants aged 18-24 months compared to 25 infants with developmental delay and 50 TD ones. They found in ASD infants a higher rate and a larger inventory of repetitive and stereotyped movements both with objects (swiping, rubbing/squeezing, rolling/knocking over, rocking/flipping, etc.) and without objects (flapping, rubbing the body, etc.). Purpura et al., analyzing retrospectively home videos, found in ten ASD infants aged 6-12 months an increased frequency as well as duration of repetitive movements of upper and lower limbs bilaterally, compared to ten TD infants and ten with developmental delay. The authors suggested that particularly hands and fingers' repetitive movements could be highly sensitive signs to consider in ASD early screening. The ability of maintaining midline head position during early infancy has been considered. Gima et al., using video recordings, studied spontaneous movements at 9-20 weeks post-term age in 14 very low birth-weight infants who later developed ASD. They found that the percentage of midline head position was lower in the ASD group than in the TD group, suggesting that during early infancy poorer skill in maintaining midline head position may help to identify infants who later develop ASD. Mitchell et al. carried out a review to identify the clinical markers for ASD in the first 2 years of life. They found, in addition to social communication deficits, also several atypical motor signs, which we mention in summary as follows, distinguishing them according to age. By 12 months of age, hypotonia and unusual posturing; atypical behaviors, such as hand flapping, finger flicking, shaking head and rolling eyes; delayed onset of independent sitting and walking; postural instability; head lag; impairment of fine motor skills. By 18 months of age: lower fine motor skills, perhaps also lower gross motor skills; reduced motor control; and postural instability. At 2 years, unusual postures, hypoactivity, and hypotonia; lower gross and/or fine motor skills; increased repetitive behaviors. This study highlights the heterogeneity of motor signs in these children, which at least partly depends on the age factor. Body symmetry in infants can be involved, as pointed out by Esposito et al., who found that ASD infants may exhibit significantly reduced static and dynamic symmetry while lying in the first 5 months or during unsupported gait when toddlers. Sparaci et al. studied longitudinally 41 HR infants at 10, 12, 18 and 24 months of life. They assessed changes in grasp types and functional actions performed with a spoon during a tool use task in the context of a play-like scenario. Based on outcome and vocabulary evaluation at 36 months, infants were subdivided into: 11 HR with ASD, 15 HR with language delay, and 15 HR without delay. More HR without delay infants than HR infants with ASD performed grasp types facilitating spoon use at age 24 months and functional actions at age 10 months. In HR infants functional action production at 10 months predicted respectively word comprehension (12 months) and production (24 and 36 months). The results of this study suggest the presence of impairments in purposeful actions in infants going on to receive ASD diagnosis and of a relationship between functional action production and communication. Also postures have been considered. Leezenbaum and Iverson studied prospectively early posture development in HR versus LR infants. They videotaped, respectively at 6, 8, 10, 12, and 14 months, 60 infants: 14 HR diagnosed with ASD (HR-ASD), 17 HR with language delay (HR-LD), 29 HR without diagnosis (HR-ND), and 25 LR. Compared to LR infants, HR-ASD ones and, to a lesser extent, HR-LD ones showed different postural trajectories characterized by slower development of more advanced postures. Further, subtle differences in posture sustainment were present between HR-ASD and HR-LD infants. Serdarevic et al. studied longitudinally 2905 children in order to look for a possible association between infants' neuromotor development and autistic traits in the general population. They examined overall motor development and muscle tone between ages 2-5 months. Parents rated their offspring autistic traits through two validated questionnaires. ASD diagnosis was confirmed in 30 children. The authors found that low muscle tone detected in infancy predicted autistic traits, while there was only a modest association between overall motor development and autistic traits. Detection of motor signs can favor an early diagnosis of autism. Sacrey et al., in a prospective study, examined parents' concerns about infants at HR for ASD at multiple time points during the first 2 years, finding that parents of HR children who were later diagnosed with ASD reported more concerns than parents of LR and HR children who did not receive ASD diagnosis. What interests us most about this study now is that concerns about sensory behavior and motor development predicted a subsequent ASD diagnosis as early as 6 months, while concerns about social communication skills and repetitive behaviors did not predict ASD diagnosis until after 12 months. Matheis et al., in a sample of 1226 ASD children, found that the mean age of first concern of their parents was 13.97 months. The most frequent first concern was related to speech/language. We want to emphasize here that first concerns related to motor development predicted an earlier age of first concern, while, on the contrary, first concerns related to communication and speech/language predicted later age of first concern. In line with what has just been reported are the results of Parmeggiani et al., who, in a retrospective cross-sectional study about ASD early features in 105 patients, found that motor skill disorders prevailed in children with age at onset in the first 12 months of life. Chinello et al. studied, in a general population of infants aged 12-17 months, the relationship between the persistence of primitive reflexes involving hand and mouth use and infants' motor repertoire, infants' age, and subclinical autistic traits in their parents. They found that persistence of the primitive reflexes was related to a worse motor repertoire (including interaction with objects and people), irrespective of the infants' age, and to subclinical autistic traits in their parents. The authors concluded suggesting the persistence of primitive reflexes as a marker for ASD early identification. Further, according to Setoh et al., their findings about infants' parents suggest that subclinical social communication anomalies may be related to lower motor performances in the next generation. Harris' review pointed out that early motor delays (first year of life) may predict a diagnosis of ASD, but also of other neurodevelopmental disorders including intellectual disability. The possible diagnosis of ASD should be considered in infants showing motor delays or other concerning motor behaviors. In this regard she suggested screening for: fine motor delays at age 6-15 months and gross motor delays at age 3-10 months; the presence of motor stereotypies (such as hand flapping at age 18-24 months and atypical limb movements while walking at age ≤24 months); motor control abnormalities, such as head lag at age 6 months, delays in bringing hands to midline at age 4-6 months, delays in protective extension reactions while sitting, and in moving freely when sitting at around 6 months. Sacrey et al. studied the motor act of reaching-to-grasp in children at HR and LR for ASD between 6 and 36 months of age. At 36 months, all children underwent a standardized diagnostic assessment, leading to a subdivision into three outcome groups: HR children with ASD diagnosis, HR children without ASD diagnosis, and LR children without ASD diagnosis. HR children with ASD showed higher, i.e., worse, total scores on the reach-to-grasp movement and higher scores on the components of orient, lift, and pronate compared to the other two groups. Considering the motor assessment in ASD children, in their overview paper Whyatt and Craig outlined the progression made from initial, broad evaluation through clinical tools such as the Movement Assessment Battery for Children (M-ABC2) to following targeted kinematic assessment. According to the authors, kinematic results showed by literature underline impaired perception-action coupling to adapt movement to task demands, leading to rigid motor profiles. Motor dysfunction may be a core feature of ASD, related to a problem with temporal control caused by impaired perception-action coupling (See Table 2 for a Summary). For the purposes of an early diagnosis of ASD, also the possible presence of atypical motor patterns should be taken into consideration. Alterations of general movements were found more frequently in infants later diagnosed with ASD. In ASD infants aged 18-24 months a higher rate and a larger inventory of repetitive and stereotyped movements both with objects and without objects have been found. ASD infants may show reduced static and dynamic body symmetry while lying in the first 5 months or during unsupported gait when toddlers. Low muscle tone detected in infancy may predict autistic traits. Modern Evaluation of Early Motor Function A clinical evaluation of motor function performed retrospectively or prospectively possibly using standardized tools, however careful and thorough, may not recognize subtle early motor signs. This has led to the development of techniques capable of providing quantitative measures of motor behavior and allowing possible more objective methods of assessment of subtle early motor signs. Some examples follow. Martin et al. conducted a quantitative study of head movement dynamics in 42 children aged 2.5-6.5 years, respectively, 21 with and 21 without ASD, through automated, computer-vision based head tracking. ASD children, compared to those without ASD, showed greater yaw displacement, i.e., greater head turning, and higher speed of yaw and roll, i.e., faster head turning and inclination. Note that head movement differences were specific to a social condition. Dawson et al. using computer vision analysis, assessed midline head postural control in 104 toddlers (age: 16-31 months), 22 of whom received a diagnosis of ASD, while watching movies including social and nonsocial stimuli. ASD toddlers showed a higher rate of head movement when compared to non-ASD toddlers, indicating difficulties in maintaining head midline position while engaging attentional systems. Caruso et al. studied the early motor trajectories of HR infants using the software MOVIDEA, developed to analyze videos providing objective kinematic features of infants' movements. They used MOVIDEA applying it to video recordings of spontaneous movements of 50 HR and 53 LR infants collected respectively at 10 days and 6, 12, 18, and 24 weeks. Considering the clinical outcome, 18 HR infants received a diagnosis of NDD, whereas 32 HR and 53 LR infants were TD. The authors found that HR infants later diagnosed with NDD presented higher general motor activity associated with lower variability and velocity, as well as higher acceleration of global movement in the space. Furthermore, these infants showed patterns of higher periodicity of limbs, particularly the upper ones, during the first 12 weeks of life. Wilson et al. utilized in a longitudinal study Opal wearable sensors to assess full day motor activity in five HR infants at 3, 6, 9, 12 months of life, thus obtaining a motion complexity measure. Motion complexity is critical to a typical motor development and its lack might indicate the presence of repetitive motor behaviors, which is a core ASD sign. The authors found that the two HR infants later diagnosed with ASD present lower motion complexity than the three that do not. This study provides interesting data, but evidently it is based on a very limited number of cases (See Table 3 for a Summary). Table 3. Modern evaluation of early motor function in ASD. A clinical evaluation of motor function performed using standardized tools, however careful and thorough, may not recognize subtle early motor signs. This has led to the development of techniques (in particular computer vision analysis) capable of providing quantitative measures of motor behavior and allowing possible more objective methods of assessment of subtle early motor signs in ASD children. Children at High Risk (HR) for ASD Atypical motor development is often described also in infants at HR for ASD, even when (and this is the most likely occurrence) they are not later diagnosed with ASD. In a prospective/longitudinal study, Nickel et al. analyzed postural development of 22 HR infants, videotaped at age 6, 9, 12, and 14 months. These infants showed, compared to 18 age-matched LR infants, delay in achieving more advanced postures, moving freely within recently achieved postures (e.g., while sitting), and moving from one posture to another. Achermann et al., through a three-dimensional motion capture technology, studied the way 10-month-old HR infants catch a ball rolling toward them, a task requiring adequate planning and execution. Several early motor measures were different in 39 HR infants in comparison with 19 controls. However, they were not related to autistic symptoms at 2 years, but to the following non-social, general development. Other specific manual motor behaviors have been considered in HR individuals. Begum Ali et al. studied body midline crossing at age 5, 10, and 14 months in 81 infants with HR for ASD, 31 with HR for Attention Deficit Hyperactivity Disorder (ADHD: another neurodevelopmental disorder according to DSM-5 ), 20 with HR for both ASD and ADHD, and 29 with LR for either ASD or ADHD. They found that only at 10 months, individuals at HR for ASD and/or ADHD showed fewer manual midline crossing compared to LR infants; midline crossing was not related to ASD traits, but to ADHD traits at age 2 years. The authors hypothesized that these results may be due to disrupted multisensory integration abilities and attention shifting in the first year of life. Leonard et al., examined the motor development of 20 children at HR for ASD, at age 9 and 40 months. All children underwent a series of motor, face processing, IQ (Intelligence Quotient) and diagnostic assessments at age 5-7 years during a follow-up visit, when only one subject had an ASD diagnosis. A greater proportion of subjects than expected showed motor problems at age 5-7 years and those reported by parents as having early poor motor skills were more likely to show lower face processing skills and higher social deficits at 5-7 years. The authors concluded that early motor problems may be a risk factor for later impairments of social communication and cognition. Garrido et al., in their meta-analysis reviewed studies about linguistic and/or motor abilities in HR for ASD children compared to LR for ASD children. Ultimately, they considered 34 eligible studies that included 2376 children at age 12 months (64% HR versus 36% LR), 3764 at age 24 months (66% HR versus 34% LR), and 3422 at age 36 months (63% HR versus 37% LR). Compared to LR children, HR infants had worse linguistic and motor (fine and gross) abilities, even though they did not show a homogeneous pattern of altered skills. These differences were detectable at the age of 12 months and seemed to persist until the age of 3 years. Differences in language abilities were greater than those in motor skills, particularly in the first year. Iverson et al., examined the gross and fine motor skills at 6 months in a large sample of 437 HR infants with heterogeneous developmental outcomes and in 188 infants with LR for ASD. Fine, but not gross, motor performance distinguished HR infants from LR infants. At 6 months fine, but not gross, motor performance predicted autism severity in the HR group at 36 months. These findings suggest the presence of early motor delays in HR infants, regardless of their developmental outcome. But relatively little is known about the nature of these delays. As pointed out by the authors, a limitation of this study (as well as of the others that have addressed these issues) is that a scale was used to assess the development of the infant which provides data on the presence or absence of motor behaviors, but not on which behaviors are present when an infant fails an item. According to the review of Varcin and Jeste, prospective, longitudinal studies about infants at HR for ASD have showed that ASD behavioral signs are usually not detected until the second year of life, whereas developmental signs during the first year, including motor impairment, are often subtle and outside the ASD core signs. The conclusions of these authors are almost entirely in line with those of Sacrey et al., according to which prospective studies about HR infants suggest that social communication deficits and repetitive behaviors appear during the second year of life, whereas additional features such as motor and sensory abnormalities appear already in the first year. Taffoni et al. studied longitudinally the motor planning development in 19 HR for ASD and in 14 LR children through a shape sorter task at 14, 18, 24, and 36 months of life. According to behavioral and kinematic data, all performance improvements in this type of task depended on several critical developments, including increased motor and perceptual competence. There were no differences between HR and LR cases, but a descriptive analysis of data regarding three HR children later diagnosed with ASD suggested the presence of early onset differences in motor planning skills: see children's action (such as reaching time and acceleration) and performance (such as the adjustment of the shapes). Landa et al., assuming that integration between visual input and motor output is crucial not only for improving motor abilities, but also for imitating and interpreting the actions of others, studied visual-motor coupling, or action anticipation, through the assessment of an interactive ball-rolling activity in 66 HR and 43 LR infants at age 6 months. Both LR and HR infants showed context appropriate looking behavior before and during the ball's trajectory toward them, but, compared to LR infants, HR ones were less likely to show context appropriate anticipatory response to the approaching ball by moving their arm/hand to intercept it. Further, in the HR group there was an atypical predictive relationship between anticipatory response at age 6 months and predilection for looking at faces compared to objects at age 14 months. The authors concluded pointing out that the skills underlying anticipatory response are necessary for the development of internal action models, which likely are important to social development (see, for example, imitation as well as production of interpretable and well-timed interpersonal actions). Is there a counterpart to motor dysfunction at the level of brain networks? Marrus et al., using resting state fcMRI, studied the functional brain networks involved in walking and gross motor development in a mixed cross-sectional and longitudinal sample of 130 HR and LR infants. At age 12 months, functional connectivity of motor and default mode networks was involved in walking, whereas at age 24 months dorsal attention and posterior cingulo-opercular networks were implicated. Examination of general gross motor function also showed an involvement of motor and default mode networks at age 12 and 24 months, whereas dorsal attention, cingulo-opercular, frontoparietal, and subcortical networks were additionally involved at age 24 months (See Table 4 for a Summary). Table 4. Children at HR for ASD. Atypical motor development is often described also in infants at HR for ASD, even when they are not later diagnosed with ASD. Prospective studies about HR infants suggest that social communication deficits and repetitive behaviors appear during the second year of life, whereas additional features such as motor abnormalities appear already in the first year. The Relationship between Motor and Social Communication Skills From literature data growing evidence emerges that motor and communication (both verbal and non-verbal) skills are connected. Developmental changes in motor skills modify the way children interact with people and objects (e.g., by showing) and they may affect language development. According to Leonard et al., the skill of exploring the environment, manipulating and sharing objects with others, stimulates the initiation of joint attention and modifies the types of parent's vocalizations and expressions received by the infant. Bradshow et al. studied the relationships between motor and social communication skills in 199 infants aged 12 months: 86 HR for ASD and 113 LR for ASD (TD). Infants were subdivided into: walkers, standers, or pre-walkers. HR walkers showed higher social communication skills, but similar cognitive skills, in comparison with HR pre-walkers. On the contrary, regardless of walking status, social communication and cognitive abilities were largely comparable for LR infants. Based on these results, the authors concluded that independent walking may foster the development of social communication skills in HR infants. Symbolic play, gestures, and language were all significantly better developed in HR walkers than in HR standers and/or pre-walkers, but it remains to be understood how the ability to walk can contribute to the development of these skills. Bruyneel et al. studied the links between motor, joint attention and language skills in LR and HR children. In both groups, fine and gross motor skills at age 10 months affected language (both comprehension and expression) at age 36 months directly and indirectly through joint attention at age 14 months. Problems in motor and joint attention skills prevailed in HR children than in LR ones. Therefore, early motor skills' assessment in HR children can be indicative of language problems later, particularly when also difficulties with joint attention occur. In a meta-analysis West, collecting data from 1953 ASD infants aged 3-42 months, found that infant motor skills differed significantly in ASD compared to TD infants and this discrepancy augmented as age increased. Collecting data from 890 ASD infants aged 6-43 months, the author found that within ASD, motor skills and communication are related. West suggested that efforts to monitor HR for ASD infants may be boosted by including motor skills' assessments. Motor deficits may be more easily detected compared to core ASD signs. Manwaring et al. studied the possible connections among gesture, fine motor, and language skills in 110 ASD children and in a control group of 87 non-ASD children (with developmental delays or with TD), aged 12-48 months. The results of this study support the hypothesis of an underlying construct of gesture use including fine motor skills and predicting concurrent receptive and expressive language in ASD young children and in non-ASD controls, as well as later receptive language in ASD children. This further supports the importance of motor and nonverbal communication strategies in early language learning. Choi et al., performed a prospective, longitudinal study about early developmental trajectories of fine motor skills in relation to expressive language outcomes considering 71 HR infants without ASD, 30 HR ones later diagnosed with ASD, and 69 LR ones without ASD. Fine motor abilities were assessed at age 6, 12, 18, and 24 months while expressive language outcomes were assessed at 36 months. HR infants later diagnosed with ASD exhibited significantly slower growth in fine motor skills from 6 to 24 months, compared to TD infants. Also, fine motor skills at age 6 months predicted expressive language at age 36 months. The authors concluded that poor fine motor skills may be addressed early in life to improve children's language outcomes. According to Iverson, HR infants vary widely in motor as well as communication development and this variation seems to produce cascading effects on development. Advances in motor skills support advances in communication (including language) development. Some HR infants seem to be indistinguishable from LR peers, while others show early but transient development delays; the most relevant delays are detectable in HR-ASD infants. However, it remains to be seen whether differences in early motor and communication development detected in HR-ASD infants are indicative of general delays or are specific to ASD. According to Iverson, one of the main developmental tasks of infancy is represented by exploration. The acquisition of new and more complex gross and fine motor abilities allows infants to obtain more information about the social and physical worlds. But if these advances are slowed, this potential for exploration and learning opportunities decreases. Motor development delays may provide very important diagnostic information as well as excellent opportunities to develop intervention strategies addressing simultaneously motor and communication skills. Assuming that in TD infants walk onset is associated with increased language growth, West et al. studied whether this association may be disrupted in HR infants, a population with important heterogeneity in motor and language development. They analyzed receptive and expressive language across the transition to walking in 91 HR infants aged 8-18 months subdivided into three groups (no diagnosis, language delay, and ASD) and in 25 LR infants aged 9-15 months. Only infants later diagnosed with ASD did not show increased language growth after walk onset. The authors concluded that walk onset may play a diverse role in language development in TD and in ASD infants. Probably, walking onset affords all infants greater autonomy in making experiences with social partners and objects but this autonomy may lead ASD infants to diverse experiences than their TD peers. MacDonald et al. studied motor skills in 159 children, respectively, with ASD (n = 110), with pervasive developmental disorder not otherwise specified (n = 26), and non-ASD (n = 23) aged 14-33 months. They found that both fine and gross motor skills predicted autism severity: children with poorer motor skills had greater deficits of social communication skills. Lemcke et al., collected prospectively data from 76,441 mothers' interviews about development and behavior of their children at 6 and 18 months. By the end of follow-up, 720 individuals with ASD and 231 individuals with intellectual disability (ID) were identified. At age 6 months, only few predictors in the area of social communication and motor development were found to prevail in the ASD and ID groups, while at age 18 months social, language, and motor skills were definitely delayed for both groups. However, signs that can distinguish ASD from ID were unclear. A result that at least apparently contradicts those previously reported comes from the study of Ben-Sasson and Gill who evaluated the development of 76 toddlers at 13 and 30 months, while their parents were given the First Year Inventory (FYI) (a standardized questionnaire for ASD screening) at 12 months. At 30 months, about 23.7% of the children received a clinical diagnosis such as, for example, ASD or developmental delay. The authors found that motor skill decrease was associated with language skill increase, while higher FYI sensory-regulatory risk was associated with gross motor skill decrease. The authors hypothesized that infants with developmental problems may divert energy from an area to a greater degree due to their developmental deficits. Lebarton and Iverson studied progresses in locomotion related to progresses in communication development in HR infants who are not later diagnosed with ASD at 36 months. Infants were assessed monthly between 5 and 14 months of age. The authors found an increased presence of gross motor skill delay from 5 to 10 months. Further, they found positive relations between sitting and gesture and babble onset, as well as between prone development and gesture onset. Therefore, they demonstrated the presence of links between gross motor and communication skills also in HR infants without ASD diagnosis. In the Lebarton and Landa's aforementioned work considering 51 LR and 89 HR individuals, motor development at 6 months predicted expressive language at age 30 and 36 months. Wu et al., compared the relationship of receptive and expressive language skills with motor functioning in 38 ASD toddlers aged 24 to 36 months and their age-matched TD peers. They found significant positive correlations between language skills and motor functioning in the ASD and TD individuals. The ASD toddlers with language delay showed worse multidimensional motor functioning than the ASD toddlers with typical language development and the TD individuals. Moreover, the lower motor functioning in ASD toddlers could predict the risks of expressive and receptive language delay. The authors concluded suggesting the importance of motor-based treatments targeting language skills in ASD young children. Tanner and Dounavi conducted a systematic review about the earliest (before 18 months) ASD symptoms considering only prospective studies. Early fine and gross motor delays showed consistent correlations with expressive and receptive language development by 24 months of age, suggesting once again a cascading effect of early motor skills on language (See Table 5 for a Summary). Developmental changes in motor skills modify the way children interact with people and objects (e.g., by showing) and they may affect language development. Efforts to monitor HR for ASD infants may be boosted by including motor skills' assessments. Motor deficits may be more easily detected than core ASD signs. One of the main developmental tasks of infancy is represented by exploration. The acquisition of new and more complex gross and fine motor abilities allows infants to obtain more information about the social and physical worlds. If these advances are slowed, this potential for exploration and learning opportunities decreases. Treatment of Motor Impairment in ASD As suggested by Lebarton and Landa, early motor interventions may reduce the negative impact of motor problems on early social communication skills. West believes the current interventions for infants and toddlers that focus primarily on increasing social communication skills may be enhanced by promoting and integrating motor behaviors. In their review, Busti Ceccarelli et al. studied the effects of interventions concerning fundamental motor skills in ASD children. They found data suggesting potentially significant advances in the motor domain after these interventions. Unfortunately, only a subgroup of the considered studies examined the possible effects in the social communication domain after the advances in the motor skills, showing not univocal but promising results. The authors concluded suggesting the inclusion of motor skills training within the intervention programs for ASD children. Even more recently, Elliott et al., pointed out that a fundamental motor ability intervention might produce improvements in ASD individuals not only at motor level, but even on social, listening, turn-taking, and transition skills. And in fact, there are research data suggesting that early motor exploratory skills are associated with expressive vocabulary at age 1, 2, and 3.5 years, with cognitive skills in toddlerhood and childhood, and also with later academic skills. For infants with clinically relevant early motor delays, intervention should focus on fundamental motor skills developing in the first year of life. Appropriate, parent-delivered interventions for motor skills may have positive effects also on other domains' skills such as face processing. According to Tanner and Dounavi, the role of early fine and gross motor abilities for expressive and receptive language development has been largely documented, therefore the authors underline the importance, already in the pre-diagnostic phase, of an intervention that follows an interdisciplinary approach, including also physical therapy, and that favors the development of motor skills. Treatment of infants with early motor deficits may be very important because achievements in these areas can modify infants' earliest experiences. But, as suggested by Leezenbaum and Iverson, instead of focusing on motor or social communication skills separately, it is probably more useful to broadly improve the infant's ability for exploratory experiences, emphasizing the reciprocal influence between infant and caregiver. This theory is sustained by research data about Early Start Denver Model, which follows a holistic approach in the treatment of very young ASD children. However, the literature data on the effectiveness of an intervention aimed at increasing motor skills in children with ASD are still very scarce today and therefore much more research is needed to better understand the effects of this type of intervention on the various domains of the development of these children (See Table 6 for a Summary). Table 6. Treatment of motor impairment in ASD. Early motor interventions may reduce the negative impact of motor problems on early social communication skills. Instead of focusing on motor or social communication skills separately, it is probably more useful to broadly improve the infant's ability for exploratory experiences, emphasizing the reciprocal influence between infant and caregiver. This theory is sustained by research data about Early Start Denver Model, which follows a holistic approach in the treatment of very young ASD children. Discussion First, we would like to mention the limitations of our narrative review that are fundamentally related to the presence of a possible bias in the selection of papers included, due to a partially inevitable subjective evaluation by the authors. However, a narrative review is methodologically indicated when the purpose of authors is giving a broad perspective about a topic (like the one covered in this paper) which is not so focused that it can be treated in a systematic review. An important element that emerges from the literature we reviewed is that the early detection of motor signs in ASD infants may contribute to making a timely autism diagnosis. For this purpose, attention should be paid within one year of life to possible (usually slight and not specific) motor signs, which not infrequently occur even before the appearance of social communication abnormalities in ASD infants. This is even more the case when dealing with a child who is predisposed to developing autism, that is a HR child. In fact, a large number of studies highlight the relevance of the developmental monitoring of HR infants, particularly those showing early motor delay who are the most likely to develop an ASD. Hence, the importance of including items dedicated to motor signs in the tests/questionnaires for early autism screening. In this regard, it would be important to know not only whether certain typical motor behaviors are present or absent, but also, if they are absent, which behaviors are observable. It should also be emphasized that a mere early delay in motor development seems to be a fairly non-specific sign, as it can be found not only in infants who will then develop ASD but also in those who will develop other neurodevelopmental disorders or even in those who will make up for the gap with their peers and will later have a normal development. What appears most characteristic of ASD, on the other hand, seems to be the presence of some early atypicalities of motor development, such as a higher rate and a larger inventory of stereotyped movements both with and without objects. However, it should be underlined that these atypicalities are often much more difficult to detect than a mere motor developmental delay during a normal clinical evaluation. In this sense, the administration of the currently used standardized tests may not be sufficient to detect a mild motor dysfunction. Motor signs, and in particular the early ones, in individuals with ASD should therefore be more valued within the diagnostic criteria of DSM-5, where so far they have been confined to the rank of associated symptoms of ASD. In our opinion, these signs might be included in ASD DSM-5 criteria within the "Restricted, repetitive patterns of behavior, interests, or activities", near to the sensory abnormalities. Early motor dysfunction could be not only a clinical marker that suggests a diagnosis, because, in the opinion of some authors, it may also play a pathogenetic role in ASD. How can this happen? Early vulnerabilities in motor skills may produce cascading effects on later outcomes, importantly, also in domains other than motor one. During the first year of life, TD infants achieve a range of new motor skills that improve considerably their interactions with people and objects creating more opportunities for exploration of the surrounding environment. These motor skills, allowing through manipulation and exploration the inclusion of objects into the interactions with others, may play also a role in the development of joint attention. Therefore, the early detection of motor signs in HR individuals, even when social communication deficits are not evident, represents a relevant warning signal that leads one to suspect the imminent appearance of an ASD clinical picture. In this perspective, it is probably not by chance that the physical activity seems to be effective not only on motor skills but also on social-communication skills and behavior (see reduction of maladaptive and stereotypical behaviors) in ASD children and adolescents, improving their quality of life. Yet, many aspects about early motor dysfunction in ASD still need to be clarified. For example, if it is true that a delay in motor development can favor the appearance of social communication deficits, why for example in individuals with infantile cerebral palsy, in which there is by definition an early, persistent and often severe motor deficit impairing the exploration of the surrounding environment, does autism occur only in a minority of cases ? Similar considerations could be made also with regard to various neuromuscular pathologies that cause a delay in the acquisition of autonomous walking or even a failure to acquire it, without this leading to the development of an autistic-like behavior. What could be behind the motor impairment found in children with autism? A clear answer to this question does not currently exist and we are still at the hypothesis level. However, at the origin of motor dysfunction in ASD infants there might be an early impairment of long-range brain connectivity (see the findings showed by brain functional magnetic resonance) causing failure in multisensory integration negatively affecting motor development, and not a defined focal lesion of the central nervous system. In fact, the neurological examination of these individuals almost never shows focal signs, as well as brain magnetic resonance imaging, which, when performed, in most cases does not shows focal lesions. An impairment of multisensory integration has also been directly or indirectly implicated in the development of various other clinical features of ASD. Conclusions Clinical experience and literature data suggest the presence of early heterogeneous motor dysfunctions in ASD patients that may even precede the onset of the core signs of autism. In the perspective of a timely diagnosis, the presence of early motor signs can be an important clue, especially in an individual considered at HR for autism. However, till now, motor signs have been considered only as associated clinical features of ASD, according to DSM-5. A pathogenetic role of early motor dysfunctions in the development of autism can be hypothesized. From this derives the importance of an early enabling intervention aimed at improving motor skills, which could also have favorable effects on other domains of development. |
The Effect Of Attitude, Subjective Norm, Perceived Behaviour Control On Intention To Reduce Food Waste And Food Waste Behaviour : Food waste behavior is currently interesting topic for researcher and environmentalist. This study aims to investigate the effect of attitude, subjective norm, perceived behavioral control on intention to reduce food waste and food waste behavior. A questionnaire survey was applied to 200 household respondents in West Sumatra. The convenience sampling technique was used to collect data from respondent. Research instruments from previous studies were adapted to measure the variables in this research. Structural Equation Modelling technique was used to analyse data of this research. Research result found that attitude has positive and significant effect on household intention to reduce food waste. In addition intention to reduce food waste also has significant effect food waste behavior. In this study, subjective norm and perceived behavioral control do not have significant effect on household intention to reduce food waste. |
Evaluating the Asset Transfer Model in Facilitating Sustainable Livelihoods This study examines whether a dairy development intervention designed as an asset transfer and training project catalyzes social capital, and sustainably improves the livelihoods of the participants. Social network analysis is used to quantify the social capital in the intervention and comparison group. The intervention group demonstrates statistically different levels of social capital confirming that the intervention resulted in an increase in social capital. We also examine whether the infusion of three sources of capital, physical (asset transfer), human (training) and social capital, generated sufficient increases in net income to bring participants above the benchmark of a living income. The levels of net income three years after the project were sixty-five percent higher than at the close of the project, providing strong evidence of improved livelihoods and sustainability. |
package com.sumologic.kinesis;
import org.apache.log4j.Logger;
import com.sumologic.kinesis.KinesisConnectorRecordProcessorFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker;
import com.amazonaws.services.kinesis.connectors.KinesisConnectorConfiguration;
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
public abstract class KinesisConnectorExecutorBase<T, U> implements Runnable {
private static final Logger LOG = Logger.getLogger(KinesisConnectorExecutorBase.class.getName());
// Amazon Kinesis Client Library worker to process records
protected Worker worker;
/**
* Initialize the Amazon Kinesis Client Library configuration and worker
*
* @param kinesisConnectorConfiguration Amazon Kinesis connector configuration
*/
protected void initialize(KinesisConnectorConfiguration kinesisConnectorConfiguration) {
initialize(kinesisConnectorConfiguration, new NullMetricsFactory());
}
/**
* Initialize the Amazon Kinesis Client Library configuration and worker with metrics factory
*
* @param kinesisConnectorConfiguration Amazon Kinesis connector configuration
* @param metricFactory would be used to emit metrics in Amazon Kinesis Client Library
*/
protected void
initialize(KinesisConnectorConfiguration kinesisConnectorConfiguration, IMetricsFactory metricFactory) {
KinesisClientLibConfiguration kinesisClientLibConfiguration =
new KinesisClientLibConfiguration(kinesisConnectorConfiguration.APP_NAME,
kinesisConnectorConfiguration.KINESIS_INPUT_STREAM,
kinesisConnectorConfiguration.AWS_CREDENTIALS_PROVIDER,
kinesisConnectorConfiguration.WORKER_ID).withKinesisEndpoint(kinesisConnectorConfiguration.KINESIS_ENDPOINT)
.withFailoverTimeMillis(kinesisConnectorConfiguration.FAILOVER_TIME)
.withMaxRecords(kinesisConnectorConfiguration.MAX_RECORDS)
.withInitialPositionInStream(kinesisConnectorConfiguration.INITIAL_POSITION_IN_STREAM)
.withIdleTimeBetweenReadsInMillis(kinesisConnectorConfiguration.IDLE_TIME_BETWEEN_READS)
.withCallProcessRecordsEvenForEmptyRecordList(KinesisConnectorConfiguration.DEFAULT_CALL_PROCESS_RECORDS_EVEN_FOR_EMPTY_LIST)
.withCleanupLeasesUponShardCompletion(kinesisConnectorConfiguration.CLEANUP_TERMINATED_SHARDS_BEFORE_EXPIRY)
.withParentShardPollIntervalMillis(kinesisConnectorConfiguration.PARENT_SHARD_POLL_INTERVAL)
.withShardSyncIntervalMillis(kinesisConnectorConfiguration.SHARD_SYNC_INTERVAL)
.withTaskBackoffTimeMillis(kinesisConnectorConfiguration.BACKOFF_INTERVAL)
.withMetricsBufferTimeMillis(kinesisConnectorConfiguration.CLOUDWATCH_BUFFER_TIME)
.withMetricsMaxQueueSize(kinesisConnectorConfiguration.CLOUDWATCH_MAX_QUEUE_SIZE)
.withUserAgent(kinesisConnectorConfiguration.APP_NAME + ","
+ kinesisConnectorConfiguration.CONNECTOR_DESTINATION + ","
+ KinesisConnectorConfiguration.KINESIS_CONNECTOR_USER_AGENT)
.withRegionName(kinesisConnectorConfiguration.REGION_NAME);
if (!kinesisConnectorConfiguration.CALL_PROCESS_RECORDS_EVEN_FOR_EMPTY_LIST) {
LOG.warn("The false value of callProcessRecordsEvenForEmptyList will be ignored. It must be set to true for the bufferTimeMillisecondsLimit to work correctly.");
}
if (kinesisConnectorConfiguration.IDLE_TIME_BETWEEN_READS > kinesisConnectorConfiguration.BUFFER_MILLISECONDS_LIMIT) {
LOG.warn("idleTimeBetweenReads is greater than bufferTimeMillisecondsLimit. For best results, ensure that bufferTimeMillisecondsLimit is more than or equal to idleTimeBetweenReads ");
}
// If a metrics factory was specified, use it.
if (metricFactory != null) {
worker =
new Worker(getKinesisConnectorRecordProcessorFactory(),
kinesisClientLibConfiguration,
metricFactory);
} else {
worker = new Worker(getKinesisConnectorRecordProcessorFactory(), kinesisClientLibConfiguration);
}
LOG.info(getClass().getSimpleName() + " worker created");
}
@Override
public void run() {
if (worker != null) {
// Start Amazon Kinesis Client Library worker to process records
LOG.info("Starting worker in " + getClass().getSimpleName());
try {
worker.run();
} catch (Throwable t) {
LOG.error(t);
throw t;
} finally {
LOG.error("Worker " + getClass().getSimpleName() + " is not running.");
}
} else {
throw new RuntimeException("Initialize must be called before run.");
}
}
/**
* This method returns a {@link KinesisConnectorRecordProcessorFactory} that contains the
* appropriate {@link IKinesisConnectorPipeline} for the Amazon Kinesis Enabled Application
*
* @return a {@link KinesisConnectorRecordProcessorFactory} that contains the appropriate
* {@link IKinesisConnectorPipeline} for the Amazon Kinesis Enabled Application
*/
public abstract KinesisConnectorRecordProcessorFactory<T, U> getKinesisConnectorRecordProcessorFactory();
}
|
<gh_stars>10-100
/* tslint:disable:no-relative-imports */
// IMPORTANT: If this fails due to the app not being able to flush it's
// event queue, this test will hang forever.
// Can we force the app to flush it's queue without killing the entire
// process (which would kill the test runner)?
import { describe } from 'riteway'
import { Frost } from '../../src/Frost'
import { delay, runtimeId, createDatabase } from '../helpers/utils'
describe('gracefully stopping the application', async assert => {
const db = await createDatabase(`test-integration-frost-api-poet-${runtimeId()}`)
const server = await Frost({
FROST_PORT: '30080',
FROST_HOST: 'localhost',
FROST_URL: 'http://localhost:30080',
MONGODB_DATABASE: db.settings.tempDbName,
MONGODB_USER: db.settings.tempDbUser,
MONGODB_PASSWORD: <PASSWORD>,
MONGODB_URL: 'mongodb://localhost:27017/frost', // force calc of url in configuration
})
// alow time for everything to start.
await delay(5)
const actual = await server.stop()
assert({
given: 'a running Frost',
should: `exit when stop() is called`,
actual,
expected: true,
})
await db.teardown()
})
|
The evolution of robotics research This article surveys traditional research topics in industrial robotics and mobile robotics and then expands on new trends in robotics research that focus more on the interaction between human and robot. The new trends in robotics research have been denominated service robotics because of their general goal of getting robots closer to human social needs, and this article surveys research on service robotics such as medical robotics, rehabilitation robotics, underwater robotics, field robotics, construction robotics and humanoid robotics. The aim of this article is to provide an overview of the evolution of research topics in robotics from classical motion control for industrial robots to modern intelligent control techniques and social learning paradigms, among other aspects. |
Comedy Central's "South Park" kicked off its 17th season on Wednesday night, picking the shadowy NSA and overly excitable Alec Baldwin as its first lampoon-worthy targets of the year. In the episode, Cartman seeks to expose the NSA's surveillance program and stumbles upon "Shitter," a next-generation social media tool that plugs one's thoughts directly to the Internet. What better spokesperson for such a product than the uncontrolled rage-tweeter, Alec Baldwin (voiced by SNL alumnus Bill Hader)?
"Hello, I'm Alec Baldwin, and I love social media. But sometimes, I accidentally tweet things that are mean or homophobic," begins Baldwin's commercial. "I don't think that way -- I just type that way. That's when I realized it wasn't me that was homophobic. It was my thumbs, and they needed to be gotten rid of."
Baldwin then cuts off his thumbs. "So then the problem was: I don't have thumbs! But I know that everyone in America still wants to hear what I have to say." |
Improving block cipher design by rearranging internal operations This paper discusses the impact of a simple strategy in block cipher design: rearranging the internal cipher components. We report on a test case in which we observed a significant upgrade on a cipher's security. We applied this approach in practice and report on an updated design of the IDEA block cipher, in which we swapped all exclusive-or operations for multiplications. The consequences of these modifications are far reaching: there are no more weak multiplicative subkeys (because multiplications are not keyed anymore) and overall diffusion improves sharply in the encryption framework. The unkeyed multiplication is novel in itself since it did not exist in IDEA as a primitive operation and it alone guarantees stronger diffusion than the exclusive-or operation. Moreover, our analysis so far indicate that the new cipher resists better than IDEA and AES against old and new attacks such as the recent biclique technique and the combined Biryukov-Demirci meet-in-the-middle attack. Experiments on an 8-bit microcontroller indicate the new design has about the same performance as IDEA. A theoretical analysis also suggests the new design is more resistant to power analysis than IDEA. |
Research question decomposition : a way to organise research output Food research is performed in multidisciplinary projects, generating a wide variety of data. It appears that often this output is unavailable for reuse in new projects. As a consequence, experimental work and analysis of results are repeated unnecessarily. This not only leads to inefficient use of resources, but even worse hinders the learning cycle that is essential to research activities. For example, sensory panel experiments usually measure sensory attributes other than strictly necessary within a specific project. However, accessing and re-interpreting the raw data is not common. The aim of this project is to develop an interactive method and tool for sharing and reusing experimental data, models and methods. The ultimate goal is to have a fully transparent knowledge chain, from fundamental to applied research and finally to application in food industry. A number of workshops and meetings with researchers have resulted in a global specification of the envisaged research management system. It appeared that a commercial LIMS (Laboratory Information Management System) or project management system would not comply with these requirements. These systems typically require fixed, static working procedures and focus either on samples or on projects as their primary entities. In the context of scientific food research a much more flexible approach is required. The crucial breakthrough was the observation that the key concept for organising research data and models is the research question, rather than project or sample. At the start of a project a general research question is formulated to define the goal of the project. This research question is then decomposed into more detailed research questions, based on new hypotheses and assumptions. This process is repeated until research questions are at such a concrete level that either experiments, or models can be defined and executed to answer the respective research question. The answers of all sub-questions will then be combined to answer the higher level question, and so on, ideally until a satisfactory answer is found at the highest level. By associating the experiments and models with research questions they will be tractable, even after the project is closed. In other words, data and models have been enriched with context information. We have implemented this approach in RMS, Research Management System. First of all, it allows researchers to access research output from any location, using a standard web browser. Second, it puts data and models in a context, by describing experimental conditions, sample |
The roof at the Millennium Stadium for Friday's RBS 6 Nations match between Wales and England is to be left open.
Both teams must agree for it to be closed and while Wales stated that is their preference, the Rugby Football Union has said that England have taken a different view.
The weather forecast for Cardiff on Friday night is cold but dry, convincing visiting head coach Stuart Lancaster to have it kept open.
It is understood that England will also have one eye on the looming World Cup, where they will play all their matches in stadiums without roofs.
Meanwhile, Rob Howley believes that England cranking up the volume on loudspeakers during training ahead of Friday night's showdown typifies the 'meticulous' preparation required at Test level.
England experienced a horror show on their last Cardiff visit two years ago, suffering a record 30-3 defeat as their RBS 6 Nations title hopes and Grand Slam dreams were reduced to ruins.
Stuart Lancaster's men have been preparing to revisit the Welsh capital by training with hymns playing on loudspeakers, leaving no stone unturned in readying themselves for what awaits from a capacity 74,500 crowd.
'At a lot of stadiums in the world it's sometimes difficult in terms of the lineout calls and the communication between half-backs,' Wales assistant coach Howley said.
'They (England) experienced that in 2013, and preparation for any international side is very meticulous. They have looked back and learnt from that experience.
'Likewise, we had the experience at Twickenham a few years ago when they had music blaring on one side of the pitch, and we found it difficult in the warm-up prior to the game.
While Wales field 11 survivors from two years ago in their starting XV - scrum-half Rhys Webb, prop Samson Lee, lock Jake Ball and flanker Dan Lydiate are the exceptions - just five England players remain in Lancaster's line-up, and three others are making their Six Nations debuts.
Numerous key England personnel are absent through injury, but Howley added: 'It's a good side.
'When you can call on four (British and Irish) Lions on the bench, England are probably the only side in world rugby that can call on that experience when they have injuries. It's going to be a tough game.
'With any player going into their first Six Nations game, it's their ability to get up to speed with the intensity.
'It's frenetic and about making decisions under pressure. That's a challenge for us on Friday night as well.
'We experienced a difficult time last year at Twickenham when we were well beaten by a very good England side. We expect that side will once again turn up at the Millennium Stadium.
'Home advantage has been key and will be key on Friday. We all talk about the cauldron, but that stadium is only as good as the players' performance.
'We just need to concentrate on what we need to do. International rugby is about small margins and we need to get them right on Friday.
'We need to start well and if we do that, the crowd will have an influence.
Wales were beaten comfortably at Twickenham in last season's Six Nations, with only full-back Leigh Halfpenny's goalkicking keeping them in a game that they eventually lost 29-18.
'Last year, I don't think we were mentally right for England,' Howley said.
'I think a lot of our players were tired. I think there was a six-month hangover from a (Lions) tour in the summer.
'I think maybe what you saw in the New Zealand game in the autumn and then the South Africa game, that will be the Wales side that turns up on Friday night.
Howley, meanwhile, has paid tribute to Wales skipper Sam Warburton, who wins his 50th cap on Friday and continues to close in on Ryan Jones' record for most appearances as Wales captain.
'Sam is an outstanding captain, he is probably one of the best openside flankers in world rugby and his ability in the contact area is second to none,' Howley added.
'Turnovers are hugely influential in the international game, and he will certainly have an influence in the game on Friday night. |
<gh_stars>100-1000
/////////////////////////////////////////////////////////////////////
// = NMatrix
//
// A linear algebra library for scientific computation in Ruby.
// NMatrix is part of SciRuby.
//
// NMatrix was originally inspired by and derived from NArray, by
// <NAME>: http://narray.rubyforge.org
//
// == Copyright Information
//
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
// NMatrix is Copyright (c) 2012 - 2014, <NAME> and the Ruby Science Foundation
//
// Please see LICENSE.txt for additional copyright notices.
//
// == Contributing
//
// By contributing source code to SciRuby, you agree to be bound by
// our Contributor Agreement:
//
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
//
// == lapacke_templates.h
//
// Templated functions for calling LAPACKE functions directly.
//
#ifndef LAPACKE_TEMPLATES_H
#define LAPACKE_TEMPLATES_H
namespace nm { namespace math { namespace lapacke {
//getrf
template <typename DType>
inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, int* ipiv) {
//We don't want to call the internal implementation since the the CLAPACK interface is slightly different than the LAPACKE.
rb_raise(rb_eNotImpError, "lapacke_getrf not implemented for non_BLAS dtypes. Try clapack_getrf instead.");
return 0;
}
template <>
inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, int* ipiv) {
return LAPACKE_sgetrf(order, m, n, a, lda, ipiv);
}
template <>
inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, int* ipiv) {
return LAPACKE_dgetrf(order, m, n, a, lda, ipiv);
}
template <>
inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, int* ipiv) {
return LAPACKE_cgetrf(order, m, n, a, lda, ipiv);
}
template <>
inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, int* ipiv) {
return LAPACKE_zgetrf(order, m, n, a, lda, ipiv);
}
template <typename DType>
inline int lapacke_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) {
return getrf<DType>(order, m, n, static_cast<DType*>(a), lda, ipiv);
}
//geqrf
template <typename DType>
inline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, DType* tau) {
rb_raise(rb_eNotImpError, "lapacke_geqrf not implemented for non_BLAS dtypes.");
return 0;
}
template <>
inline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, float* tau) {
return LAPACKE_sgeqrf(order, m, n, a, lda, tau);
}
template < >
inline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, double* tau) {
return LAPACKE_dgeqrf(order, m, n, a, lda, tau);
}
template <>
inline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, Complex64* tau) {
return LAPACKE_cgeqrf(order, m, n, a, lda, tau);
}
template <>
inline int geqrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, Complex128* tau) {
return LAPACKE_zgeqrf(order, m, n, a, lda, tau);
}
template <typename DType>
inline int lapacke_geqrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, void* tau) {
return geqrf<DType>(order, m, n, static_cast<DType*>(a), lda, static_cast<DType*>(tau));
}
//ormqr
template <typename DType>
inline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, DType* a, const int lda, DType* tau, DType* c, const int ldc) {
rb_raise(rb_eNotImpError, "lapacke_ormqr not implemented for non_BLAS dtypes.");
return 0;
}
template <>
inline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, float* a, const int lda, float* tau, float* c, const int ldc) {
return LAPACKE_sormqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);
}
template <>
inline int ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, double* a, const int lda, double* tau, double* c, const int ldc) {
return LAPACKE_dormqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);
}
template <typename DType>
inline int lapacke_ormqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) {
return ormqr<DType>(order, side, trans, m, n, k, static_cast<DType*>(a), lda, static_cast<DType*>(tau), static_cast<DType*>(c), ldc);
}
//unmqr
template <typename DType>
inline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, DType* a, const int lda, DType* tau, DType* c, const int ldc) {
rb_raise(rb_eNotImpError, "lapacke_unmqr not implemented for non complex dtypes.");
return 0;
}
template <>
inline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, Complex64* a, const int lda, Complex64* tau, Complex64* c, const int ldc) {
return LAPACKE_cunmqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);
}
template <>
inline int unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, Complex128* a, const int lda, Complex128* tau, Complex128* c, const int ldc) {
return LAPACKE_zunmqr(order, side, trans, m, n, k, a, lda, tau, c, ldc);
}
template <typename DType>
inline int lapacke_unmqr(const enum CBLAS_ORDER order, char side, char trans, const int m, const int n, const int k, void* a, const int lda, void* tau, void* c, const int ldc) {
return unmqr<DType>(order, side, trans, m, n, k, static_cast<DType*>(a), lda, static_cast<DType*>(tau), static_cast<DType*>(c), ldc);
}
//getri
template <typename DType>
inline int getri(const enum CBLAS_ORDER order, const int n, DType* a, const int lda, const int* ipiv) {
rb_raise(rb_eNotImpError, "getri not yet implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int getri(const enum CBLAS_ORDER order, const int n, float* a, const int lda, const int* ipiv) {
return LAPACKE_sgetri(order, n, a, lda, ipiv);
}
template <>
inline int getri(const enum CBLAS_ORDER order, const int n, double* a, const int lda, const int* ipiv) {
return LAPACKE_dgetri(order, n, a, lda, ipiv);
}
template <>
inline int getri(const enum CBLAS_ORDER order, const int n, Complex64* a, const int lda, const int* ipiv) {
return LAPACKE_cgetri(order, n, a, lda, ipiv);
}
template <>
inline int getri(const enum CBLAS_ORDER order, const int n, Complex128* a, const int lda, const int* ipiv) {
return LAPACKE_zgetri(order, n, a, lda, ipiv);
}
template <typename DType>
inline int lapacke_getri(const enum CBLAS_ORDER order, const int n, void* a, const int lda, const int* ipiv) {
return getri<DType>(order, n, static_cast<DType*>(a), lda, ipiv);
}
//getrs
template <typename DType>
inline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const DType* A,
const int lda, const int* ipiv, DType* B, const int ldb)
{
rb_raise(rb_eNotImpError, "lapacke_getrs not implemented for non_BLAS dtypes. Try clapack_getrs instead.");
return 0;
}
template <>
inline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const float* A,
const int lda, const int* ipiv, float* B, const int ldb)
{
return LAPACKE_sgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
}
template <>
inline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const double* A,
const int lda, const int* ipiv, double* B, const int ldb)
{
return LAPACKE_dgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
}
template <>
inline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const Complex64* A,
const int lda, const int* ipiv, Complex64* B, const int ldb)
{
return LAPACKE_cgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
}
template <>
inline int getrs(const enum CBLAS_ORDER Order, char Trans, const int N, const int NRHS, const Complex128* A,
const int lda, const int* ipiv, Complex128* B, const int ldb)
{
return LAPACKE_zgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
}
template <typename DType>
inline int lapacke_getrs(const enum CBLAS_ORDER order, char trans, const int n, const int nrhs,
const void* a, const int lda, const int* ipiv, void* b, const int ldb) {
return getrs<DType>(order, trans, n, nrhs, static_cast<const DType*>(a), lda, ipiv, static_cast<DType*>(b), ldb);
}
//potrf
template <typename DType>
inline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, DType* A, const int lda) {
rb_raise(rb_eNotImpError, "not implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, float* A, const int lda) {
return LAPACKE_spotrf(order, uplo, N, A, lda);
}
template <>
inline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, double* A, const int lda) {
return LAPACKE_dpotrf(order, uplo, N, A, lda);
}
template <>
inline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, Complex64* A, const int lda) {
return LAPACKE_cpotrf(order, uplo, N, A, lda);
}
template <>
inline int potrf(const enum CBLAS_ORDER order, char uplo, const int N, Complex128* A, const int lda) {
return LAPACKE_zpotrf(order, uplo, N, A, lda);
}
template <typename DType>
inline int lapacke_potrf(const enum CBLAS_ORDER order, char uplo, const int n, void* a, const int lda) {
return potrf<DType>(order, uplo, n, static_cast<DType*>(a), lda);
}
//potrs
template <typename DType>
inline int potrs(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const DType* A,
const int lda, DType* B, const int ldb)
{
rb_raise(rb_eNotImpError, "not implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int potrs<float> (const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const float* A,
const int lda, float* B, const int ldb)
{
return LAPACKE_spotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
}
template <>
inline int potrs<double>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const double* A,
const int lda, double* B, const int ldb)
{
return LAPACKE_dpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
}
template <>
inline int potrs<Complex64>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const Complex64* A,
const int lda, Complex64* B, const int ldb)
{
return LAPACKE_cpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
}
template <>
inline int potrs<Complex128>(const enum CBLAS_ORDER Order, char Uplo, const int N, const int NRHS, const Complex128* A,
const int lda, Complex128* B, const int ldb)
{
return LAPACKE_zpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
}
template <typename DType>
inline int lapacke_potrs(const enum CBLAS_ORDER order, char uplo, const int n, const int nrhs,
const void* a, const int lda, void* b, const int ldb) {
return potrs<DType>(order, uplo, n, nrhs, static_cast<const DType*>(a), lda, static_cast<DType*>(b), ldb);
}
//potri
template <typename DType>
inline int potri(const enum CBLAS_ORDER order, char uplo, const int n, DType* a, const int lda) {
rb_raise(rb_eNotImpError, "potri not yet implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int potri(const enum CBLAS_ORDER order, char uplo, const int n, float* a, const int lda) {
return LAPACKE_spotri(order, uplo, n, a, lda);
}
template <>
inline int potri(const enum CBLAS_ORDER order, char uplo, const int n, double* a, const int lda) {
return LAPACKE_dpotri(order, uplo, n, a, lda);
}
template <>
inline int potri(const enum CBLAS_ORDER order, char uplo, const int n, Complex64* a, const int lda) {
return LAPACKE_cpotri(order, uplo, n, a, lda);
}
template <>
inline int potri(const enum CBLAS_ORDER order, char uplo, const int n, Complex128* a, const int lda) {
return LAPACKE_zpotri(order, uplo, n, a, lda);
}
template <typename DType>
inline int lapacke_potri(const enum CBLAS_ORDER order, char uplo, const int n, void* a, const int lda) {
return potri<DType>(order, uplo, n, static_cast<DType*>(a), lda);
}
//gesvd
template <typename DType, typename CType>
inline int gesvd(int matrix_layout, char jobu, char jobvt, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt, CType* superb) {
rb_raise(rb_eNotImpError, "gesvd not yet implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int gesvd<float, float>(int matrix_layout, char jobu, char jobvt, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt, float* superb) {
return LAPACKE_sgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);
}
template <>
inline int gesvd<double, double>(int matrix_layout, char jobu, char jobvt, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt, double* superb) {
return LAPACKE_dgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);
}
template <>
inline int gesvd<nm::Complex64, float>(int matrix_layout, char jobu, char jobvt, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt, float* superb) {
return LAPACKE_cgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);
}
template <>
inline int gesvd<nm::Complex128, double>(int matrix_layout, char jobu, char jobvt, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt, double* superb) {
return LAPACKE_zgesvd(matrix_layout, jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, superb);
}
template <typename DType, typename CType>
inline int lapacke_gesvd(int matrix_layout, char jobu, char jobvt, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt, void* superb) {
return gesvd<DType,CType>(matrix_layout, jobu, jobvt, m, n, static_cast<DType*>(a), lda, static_cast<CType*>(s), static_cast<DType*>(u), ldu, static_cast<DType*>(vt), ldvt, static_cast<CType*>(superb));
}
//gesdd
template <typename DType, typename CType>
inline int gesdd(int matrix_layout, char jobz, int m, int n, DType* a, int lda, CType* s, DType* u, int ldu, DType* vt, int ldvt) {
rb_raise(rb_eNotImpError, "gesdd not yet implemented for non-BLAS dtypes");
return 0;
}
template <>
inline int gesdd<float, float>(int matrix_layout, char jobz, int m, int n, float* a, int lda, float* s, float* u, int ldu, float* vt, int ldvt) {
return LAPACKE_sgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);
}
template <>
inline int gesdd<double, double>(int matrix_layout, char jobz, int m, int n, double* a, int lda, double* s, double* u, int ldu, double* vt, int ldvt) {
return LAPACKE_dgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);
}
template <>
inline int gesdd<nm::Complex64, float>(int matrix_layout, char jobz, int m, int n, nm::Complex64* a, int lda, float* s, nm::Complex64* u, int ldu, nm::Complex64* vt, int ldvt) {
return LAPACKE_cgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);
}
template <>
inline int gesdd<nm::Complex128, double>(int matrix_layout, char jobz, int m, int n, nm::Complex128* a, int lda, double* s, nm::Complex128* u, int ldu, nm::Complex128* vt, int ldvt) {
return LAPACKE_zgesdd(matrix_layout, jobz, m, n, a, lda, s, u, ldu, vt, ldvt);
}
template <typename DType, typename CType>
inline int lapacke_gesdd(int matrix_layout, char jobz, int m, int n, void* a, int lda, void* s, void* u, int ldu, void* vt, int ldvt) {
return gesdd<DType,CType>(matrix_layout, jobz, m, n, static_cast<DType*>(a), lda, static_cast<CType*>(s), static_cast<DType*>(u), ldu, static_cast<DType*>(vt), ldvt);
}
//geev
//This one is a little tricky. The signature is different for the complex
//versions than for the real ones. This is because real matrices can have
//complex eigenvalues. For the complex types, the eigenvalues are just
//returned in argument that's a complex array, but for real types the real
//parts of the eigenvalues are returned
//in one (array) argument, and the complex parts in a separate argument.
//The solution is that the template takes an vi argument, but it is just
//ignored in the specializations for complex types.
template <typename DType>
inline int geev(int matrix_layout, char jobvl, char jobvr, int n, DType* a, int lda, DType* w, DType* wi, DType* vl, int ldvl, DType* vr, int ldvr) {
rb_raise(rb_eNotImpError, "not yet implemented for non-BLAS dtypes");
return -1;
}
template <>
inline int geev(int matrix_layout, char jobvl, char jobvr, int n, float* a, int lda, float* w, float* wi, float* vl, int ldvl, float* vr, int ldvr) {
return LAPACKE_sgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, wi, vl, ldvl, vr, ldvr);
}
template <>
inline int geev(int matrix_layout, char jobvl, char jobvr, int n, double* a, int lda, double* w, double* wi, double* vl, int ldvl, double* vr, int ldvr) {
return LAPACKE_dgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, wi, vl, ldvl, vr, ldvr);
}
template <>
inline int geev(int matrix_layout, char jobvl, char jobvr, int n, Complex64* a, int lda, Complex64* w, Complex64* wi, Complex64* vl, int ldvl, Complex64* vr, int ldvr) {
return LAPACKE_cgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr);
}
template <>
inline int geev(int matrix_layout, char jobvl, char jobvr, int n, Complex128* a, int lda, Complex128* w, Complex128* wi, Complex128* vl, int ldvl, Complex128* vr, int ldvr) {
return LAPACKE_zgeev(matrix_layout, jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr);
}
template <typename DType>
inline int lapacke_geev(int matrix_layout, char jobvl, char jobvr, int n, void* a, int lda, void* w, void* wi, void* vl, int ldvl, void* vr, int ldvr) {
return geev<DType>(matrix_layout, jobvl, jobvr, n, static_cast<DType*>(a), lda, static_cast<DType*>(w), static_cast<DType*>(wi), static_cast<DType*>(vl), ldvl, static_cast<DType*>(vr), ldvr);
}
}}}
#endif
|
Preoperative and early postoperative seizures in patients with glioblastomatwo sides of the same coin? Abstract Background Symptomatic epilepsy is a common symptom of glioblastoma, which may occur in different stages of disease. There are discrepant reports on association between early seizures and glioblastoma survival, even less is known about the background of these seizures. We aimed at analyzing the risk factors and clinical impact of perioperative seizures in glioblastoma. Methods All consecutive cases with de-novo glioblastoma treated at our institution between 01/2006 and 12/2018 were eligible for this study. Perioperative seizures were stratified into seizures at onset (SAO) and early postoperative seizures (EPS, ≤21days after surgery). Associations between patients characteristics and overall survival (OS) with SAO and EPS were addressed. Results In the final cohort (n = 867), SAO and EPS occurred in 236 (27.2%) and 67 (7.7%) patients, respectively. SAO were independently predicted by younger age (P =.009), higher KPS score (P =.002), tumor location (parietal lobe, P =.001), GFAP expression (≥35%, P =.045), and serum chloride at admission (>102 mmol/L, P =.004). In turn, EPS were independently associated with tumor location (frontal or temporal lobe, P =.013) and pathologic laboratory values at admission (hemoglobin < 12 g/dL, , CRP > 1.0 mg/dL , and GGT > 55 U/L ). Finally, SAO were associated with gross-total resection (P =.006) and longer OS (P =.030), whereas EPS were related to incomplete resection (P =.005) and poorer OS (P =.009). Conclusions In glioblastoma patients, SAO and EPS seem to have quite different triggers and contrary impact on treatment success and OS. The clinical characteristics of SAO and EPS patients might contribute to the observed survival differences. Preoperative and early postoperative seizures in patients with glioblastoma-two sides of the same coin? Symptomatic seizures are common in primary and secondary brain tumors. 1 In case of glioblastoma, 1 of 4 individuals shows seizures at onset (SAO) of disease as first clinical symptom. 2 In addition, glioblastoma patients frequently develop secondary epilepsy due to rapid tumor progression. 3 Several previous reports have addressed the association between SAO occurrence and glioblastoma survival, both with positive 2,4-8 and negative results. The majority of recent studies has specifically focused on the role of antiepileptic drugs (AED) on the survival effect of SAO. 6,12, However, a recent large pooled analysis of prospective clinical trials 25 has failed to show any survival benefit from the use of AED in glioblastoma. In this context, the knowledge of SAO predictors might be essential for a better understanding of these early glioblastoma-associated seizures and possible links with patients survival. Unfortunately, evidence on SAO predictors in glioblastoma is sparse, since the majority of SAO-related studies is based on either heterogenous (high and low grade) glioma cohorts or small glioblastoma populations. 6,14,26,27 Even less is known on the rate, risk factors and clinical impact of early postoperative seizures (EPS), which are not related to tumor progression and therefore have unclear pathophysiologic background. Accordingly, this study analyzed the risk factors for SAO and the relationship of SAO with overall survival (OS) in a large consecutive glioblastoma cohort. Moreover, we addressed the predictors and the clinical impact of EPS after glioblastoma surgery, as well as the similarities and differences between SAO and EPS. Patient Population and Clinical Management All consecutive cases with newly diagnosed glioblastoma treated between January 2006 and December 2018 in our institution were eligible for this study. The exclusion criteria were the following: (a) pediatric cases (<18 years old), (b) previous history of epilepsy, and (c) extracranial location. The retrospective study was conducted in accordance with the STROBE guidelines and was approved by the Institutional Ethics Committee, University of Duisburg-Essen (15-6504-BO). All cases were histologically confirmed via stereotactic biopsy or tumor resection. Early postoperative MRI within 72 h after surgery was performed after tumor resection. Standard chemoradiation with concomitant and adjuvant temozolomide 28 was initiated after surgery. Patients with poor perioperative neurological condition and/or without willingness for further treatment, were referred to best supportive care. According to the current guidelines, 29 AED treatment was usually initiated after the first seizure event. Prophylactic use of AED was not routinely performed in the cohort, except for selected cases. Data Management The following patients characteristics were collected from the electronic medical records: demographic (age and sex) and anthropometric parameters (body height, weight, and body mass index), medical history (arterial hypertension, diabetes mellitus, history of cancer, and hypothyroidism), KPS score at admission, tumor location, extent of resection (EOR), immunohistochemical and moleculargenetic parameters (expression of glial fibrillary acidic protein , p53 and Ki-67 proliferation index, the isocitrate dehydrogenase 1 gene mutation, and O 6methylguanine DNA methyltransferase promoter methylation status). In addition, 27 routine laboratory measurements at admission and 3 surrogate markers were assessed (see the Supplementary Table S1 with the full list of over 100 variables correlated with the study endpoints). Finally, overall survival or date of the last outpatient follow-up was assessed. The evaluation of histological specimens was based on the original reports of 2 clinical neuropathologists, as described previously. 30 All histological and molecular findings were reviewed in accordance with the 2016 Classification of the Central Nervous System Tumors of the World Health Organization. 31 Tumor location was assessed upon the review of the preoperative MRI imaging. In addition, postoperative MRI scans were analyzed with regard to EOR. The cases without contrast-enhancing residual after tumor resection were considered gross-total resection (GTR), the Importance of the Study To date, there is a large discrepancy regarding the rate, risk factors, and predictive value of perioperative epileptic seizures in patients with glioblastoma. In this large consecutive glioblastoma cohort, we evaluated over 100 patient/tumor-specific variables with regard to the association with seizures at onset (SAO) and early postoperative seizures (EPS). SAO were independently associated with younger age, better preoperative clinical performance, higher GFAP expression, and higher serum chloride levels. In addition, SAO were related to more radical extent of resection and longer overall survival. In contrast, EPS were strongly associated with presence of systemic disorders like anemia, infection, and liver dysfunction as well as incomplete tumor resection and poorer overall survival. Our results encourage further analysis of the effect of perioperative seizures on glioblastoma survival. remaining cases were regarded as tumor debulking. All available preoperative and postoperative MRI scans were reviewed by the first author (Y.A.) blinded at this time for any clinical information. The diagnosis of epilepsy was based on the occurrence of clinical symptoms suspicious for seizures (involuntary movements, abnormal sensory signs, or an altered mental status). Additionally, patients underwent an electroencephalogram in case of questionable/nonconclusive clinical symptoms. All patients with symptomatic epilepsy were consulted by in-house epileptologists, for diagnosis confirmation, assessment of seizure semiology, and medical treatment. Epileptic seizures leading to the radiographic diagnosis of glioblastoma were regarded as SAO. Postoperative epileptic seizures occurring up to 3 weeks after surgery and prior to the begin of chemoradiation were regarded as EPS. When available, semiology (secondary generalized, simple or complex focal, or status epilepticus) and timing (in relation to the surgery day) of the seizures were documented. The hospital records were also reviewed for AED treatment. Study Endpoints and Statistical Analysis The following primary endpoints were addressed in this study: (a) independent predictors of SAO and (b) EPS and (c) association between the SAO/EPS and OS. The secondary endpoint of the study was the evaluation of the patients' characteristics related to the seizure semiology. Statistical analyses were performed with the help of PRISM (version 5.0, GraphPad Software Inc.) and SPSS (version 25, SPSS Inc., IBM). Patients' baseline characteristics were expressed as mean ± standard deviation (SD) or percentage of patients, as appropriate. For OS data, median values with interquartile range were reported. Differences with a P ≤.05 were regarded as statistically significant. First, all associations between the potential risk factors and the study endpoints were tested using univariate analysis. For univariate correlations, differences between continuous variables were analyzed using the Student' t-test for normally distributed data and the Mann-Whitney U test for non-normal distributed data; associations between categorical variables were analyzed using the chi-square or Fisher's exact tests, as appropriate. Laboratory measurements were evaluated as continuous variables and in dichotomized manner, according to the common reference values for upper and lower ranges. For laboratory tests showing significant associations with the endpoints only as continuous variables, an additional dichotomization was performed upon the cutoffs defined on the receiver operating characteristic (ROC) curve. For immunohistochemical tumor characteristics, the dichotomization was applied using the cutoffs reported previously in the literature and/or the results from the ROC curves. The significant correlations from univariate analyses were then evaluated in a multivariate analysis. Binary logistic regression analysis was used for the identification of independent predictors of SAO/EPS, as well as for their relation to EOR. Laboratory markers were first tested in a separate multivariate assessment prior to the inclusion in the final regression analysis. For correlation between SAO/ EPS and OS, a Cox proportional-hazards model was applied by adding the common outcome confounders (age, KPS, EOR, molecular markers, and adjuvant treatment). Missing data were replaced using multiple imputation. Data Availability Statement Any data not published within the article will be shared in anonymized manner by request from any qualified investigator. Patient Population After the exclusion of noneligible cases (age < 18 years, n = 7; history of epilepsy, n = 6; and spinal glioblastoma, n = 1), 867 individuals were included in the final analysis. The baseline characteristics of the cohort are presented in Table 1. Perioperative Seizures: Occurrence and Management The rates of SAO and EPS in the cohort were 27.2% (n = 236) and 7.7% (n = 67), respectively. Due to initially minor radiographic findings, a watch-and-wait strategy was applied in 14 SAO patients, resulting in late surgery between 2 and 11 months after seizure onset (mean interval: 5.71 months). In the remaining cases, all patients (with or without SAO) were operated within 2 weeks after the radiographic diagnosis. EPS were documented on the mean postoperative day 4.6 (±5.0). Twenty-three glioblastoma patients developed >1 separate seizure event during the perioperative course. Of them, 10 patients showed SAO and EPS. AED treatment was initiated after the occurrence of first seizure(s) in all cases except for 13 individuals with SAO. In addition, 13 patients received prophylactic AED treatment without preceding seizures. There was a wide range of AED used in the cohort, with levetiracetam and valproic acid as the most common drugs. Predictors of Perioperative Seizures SAO and EPS showed partially contrary correlations with the baseline characteristics (see Table 2 for the univariate analyses of the results reaching the significance level for at least one study endpoint, for the full list of all associations see the Supplementary Table S1). In particular, SAO were more common in individuals with younger age (P =.001) and higher preoperative KPS score (P <.0001). The tumors located in the parietal lobe (odds ratio : 1.96, P =.003, see Figure 1 for the distribution of SAO/EPS rates in different brain areas), those with higher GFAP expression (≥35%, OR: 1.88, P =.05) and more radical EOR (tumor resection vs biopsy and GTR vs debulking ) were also associated with SAO. In turn, EPS were related to comorbidities (arterial hypertension and history of cancer ) and tumors located in the frontal or temporal lobe (OR: 2.07, P =.017). Moreover, there was an inverse association CRP, C-reactive protein; EOR, extent of resection; GFAP, glial fibrillary acidic protein immunohistochemistry staining percentage; GGT, gammaglutamyl-transferase; GTR, gross total resection; MPV, mean platelet volume; OR, odds ratio; PLT, blood platelets; SD, standard deviation; WBC, white blood cells. Differences with a P ≤.05 were regarded as statistically significant. Neuro-Oncology Advances between the EOR and occurrence of EPS in the individuals undergoing tumor resection (GTR vs debulking, OR: 0.42, P =.005). Finally, certain admission laboratory variables were also associated with the SAO and EPS. Of note, pathologic laboratory values were more characteristic for EPS than for SAO. Impact of Perioperative Seizures on OS Correlation between perioperative seizures and patients' survival showed contrary effects of SAO and EPS. In particular, OS was significantly longer in individuals with SAO than without (12.4 vs 8.0 months, P <.0001, Figure 2A). In contrast, patients with EPS had poorer outcome, as compared to the counterparts without EPS (6.4 vs 9.3 months, P =.033, Figure 2B). Of note, glioblastoma individuals with SAO without surgery delay showed a trend to longer OS than the above-mentioned 14 cases with delayed surgery after SAO (13.0 vs 7.7 months, P =.1073). The multivariate analysis for independent OS predictors (adjusted for patients' age, KPS, EOR, MGMT/IDH1 status, and postoperative chemoradiation, see Supplementary Table S4) Discussion The risk factors for perioperative seizures in glioblastoma remain poorly understood. Moreover, the clinical impact of SAO and EPS is still a matter of debate. In this large consecutive series of glioblastoma patients, we have identified different risk patterns for the occurrence of SAO and EPS. In addition, there was a contrary effect of SAO and EPS on OS. Epileptogenesis of SAO The studies on the predictors of SAO are mostly based on heterogenous glioma cohorts and presume higher risk of SAO (and of symptomatic seizures in general) in low-grade glioma, partially due to the survival differences compared with high-grade glioma. 1,32,33 Recent reports pointed to the crucial role of IDH1 mutation in tumor epileptogenesis. 14,26,34,35 Other molecular-genetic and immunohistochemical tumor markers like 1p19qcodeletion, MGMT promoter methylation, BDNF, ADK, BRAF V600E mutations, MMP-9, expressions of miR-128, nuclear protein Ki-67, p53, RINT1, and VLGR, EGFR amplification, and PI3K-mTOR pathway were also addressed, and partially linked with the risk of symptomatic epilepsy in glioma patients. 1 Figure 1. Incidence of SAO and EPS depending on tumor location As to the studies based on glioblastoma cohorts, younger age, 14,34 certain tumor locations, 13,14 expression of p53 34 and glutamine synthetase, 10,15 higher preoperative KPS score, 14 smaller volume of tumor, intratumoral necrosis and peri-tumoral edema, 10,11 and statin medication (inversely) 11 were reported to be associated with SAO. At the same time, other studies did not identify any relationship between the patients' age, 13 tumor location, 10 and size 13 with the occurrence of preoperative seizures. The major limitation of all these studies refers to mostly smaller cohort size and missing evaluation of the independent predictive value of each of the reported risk factors. Our large glioblastoma-based cohort confirmed the essential role of such predictors like younger age, higher KPS score and tumor location in the genesis of SAO. In addition, we identified 2 other independent risk factors for SAO-higher expression of GFAP in tumor tissue and high-normal to increased levels of serum chloride (>102 mmol/L). In glial cells, GFAP is involved in cytoskeleton architecture, maintaining the mechanical strength, the associations with surrounding neurons and the bloodbrain barrier. 30 The relationship between GFAP expression and seizure activity in astrocytes has been addressed in experimental epilepsy studies beyond oncological research, 38,39 and has been described in selected cases of glioma patients. 40 Whether higher GFAP expression leads to increased epileptogenicity or vice versa remains uncertain, since increase of GFAP expression secondary to epileptic seizures was demonstrated experimentally. 41 Notably, higher GFAP expression in low-grade glioma was previously reported. 30,42 This circumstance is in line with potential epileptogenic role of GFAP in glial tumors, since higher prevalence of epilepsy in low-versus high-grade glioma has already been mentioned before. In virtue of all these findings, further research of the role of GFAP in seizure activity of glial tumors is mandatory. Moreover, we analyzed a wide range of admission laboratory values and found independent associations between higher serum chloride levels and the occurrence of SAO in glioblastoma. The interpretation of admission blood tests with regard to SAO epileptogenesis is generally problematic. The altered laboratory values might rather be secondary to seizure event(s), than provide the insights into the systemic processes, which promote the seizures. This would particularly explain higher levels of calcium and sodium in serum of individuals with SAO, because seizures can result in hypercalcemia and hypernatremia. 43 As to higher chloride levels in SAO patients, this finding might have certain causal implications. The impact of chloride ions on seizure activity in neuronal cells is widely acknowledged and related to gamma-Aminobutyric acid (GABA) receptor activity. 44,45 Experimental research with glial tumor cells has demonstrated that elevated intracellular chloride concentrations cause hyperpolarization of GABAergic aOR, adjusted odds ratio; CRP, C-reactive protein; IHC, immunohistochemistry; GFAP, glial fibrillary acidic protein staining percentage; GGT, gamma-glutamyl-transferase. Differences with a P ≤.05 were regarded as statistically significant. Neuro-Oncology Advances neurons and lead to reduced network. 1,46 In consequence, cumulative reduction of the inhibitory postsynaptic potential due to reduced GABAergic synaptic density is supposed to foster. 1,46,47 Therefore, our findings strengthen the current hypotheses on the epileptogenesis of gliomas. EPS: A Strong Negative Predictor In contrast to delayed seizures due to postoperative tumor progression, less is known about the incidence and the genesis of EPS. This is the first study addressing the risk factors for and clinical consequences of EPS after glioblastoma surgery. Unlike SAO, EPS were more related to systemic dysfunctions present at the time of admission like anemia, systemic infection, and altered liver homeostasis. In addition, previous medical history (arterial hypertension and history of cancer) was also associated with EPS risk, however only in univariate analysis. It has been reported that various systemic diseases like endocrine, electrolyte and autoimmune disorders, organ dysfunction and failure, cancer, and paraneoplastic disorders facilitate seizure activity. 48 Therefore, the risk of EPS after glioblastoma surgery might already be estimated preoperatively allowing timely selection of the patients requiring special postoperative care. Not less interesting is the association of EPS with EOR and OS. The link between incomplete tumor resection and postoperative seizure risk has already been reported for glial 13 and metastatic brain 49 tumors. In summary, poorer outcome of glioblastoma individuals with EPS might be related to incomplete tumor resection and presence of above-mentioned systemic disorders. Symptomatic Seizures in Glioblastoma: Is There Any Link With Outcome? A large number of studies has addressed the association between early symptomatic seizures and glioblastoma survival reporting on longer OS in patients with SAO. 2, Noteworthy, there are also several publications not confirming the predictive impact of SAO on patients survival. Moreover, many recent studies focused not on the SAO event as outcome confounder, but on the potential effect of AED on glioblastoma survival. Antitumoral activity of different AED was analyzed in numerous clinical and experimental studies. 1,12,23,33,50 Unfortunately, the results are strongly conflicting, both with 18-21 and without 6,12, any association between AED use and survival. A recent large pooled analysis of prospective clinical trials 25 has failed to show any survival benefit from the use of AED in newly diagnosed glioblastoma. Our results might shed some light on the backgrounds of discrepant results of the previous studies on the role of AED. Occurrence of EPS also necessitates early initiation of AED treatment. In turn, EPS are associated with poorer survival after glioblastoma surgery, as we demonstrate in the present study. Therefore, future studies on AED effect on glioblastoma survival should also take the timing and causal background of perioperative seizures into account. We could show independent association between SAO and OS. The vast majority of the studies, which failed to show a benefit of SAO on glioblastoma survival were based on relatively small cohorts and their results were therefore statistically underpowered. A recent metaanalysis 2 also showed extended OS in individuals with SAO. In virtue of the findings of the present and previous studies, the following major conclusions regarding the SAO and OS can be made: (a) occurrence of SAO is associated with more favorable outcome of glioblastoma; (b) this effect might be related to earlier diagnosis of glioblastoma, since these patients present with higher KPS score and smaller tumor burden, and the SAO individuals with surgery delay seem to "lose" the survival benefit; (c) clinical characteristics of SAO patients like younger age and more radical EOR might also contribute to better treatment results. Study Limitations Retrospective design presents the major limitation of this study. Therefore, there is certain portion of missing data due to incomplete documentation in the hospital records. Regarding the molecular features of glioblastoma, missing data is mainly related to later implementation of molecular markers into the diagnostic set-up of glioblastoma, for example, IDH1 mutation status. These variables are insofar important because molecular tumor characteristics might present the causal link explaining the association between the seizure risk and survival in glioblastoma patients. Therefore, further research on the role of already established and novel molecular tumor markers on epileptogenesis in glioblastoma are mandatory. Nevertheless, we present a study based on a large consecutive series and adjust the study results for relevant endpoint confounders and information bias using multivariate analysis and multiple imputation. Conclusions In glioblastoma patients, SAO are independently associated with younger age, better preoperative clinical performance, certain tumor characteristics (location in the parietal lobe and higher GFAP expression), and serum ion alterations (higher chloride levels). In addition, SAO are related to more radical EOR and favorable OS. In contrast, EPS are strongly associated with presence of systemic disorders (anemia, infection, and liver dysfunction), incomplete tumor resection and poorer OS. Our results encourage further analysis of the effect of perioperative seizures on glioblastoma survival. |
/// Execute a function in a child process.
///
/// After a fork the following is done:
/// - In the parent:
/// - Returns immediately.
/// - In the child:
/// - Setup a logger that logs to a file.
/// - Setup a panic hook that logs an error on panic.
/// - Detach the stdin/stdout/stderr file descriptors.
pub fn child<F>(f: F) -> Result<()>
where
F: FnOnce() -> Result<()>,
{
if let Fork::Child = fork()? {
if let Err(err) = execute(f) {
log::error!("{:#}", err);
process::exit(1);
}
process::exit(0);
}
Ok(())
} |
export const enableHistoryMode = { type: 'GAME::HISTORY::ENABLE' };
export const disableHistoryMode = { type: 'GAME::HISTORY::DISABLE' };
export const fetchTurn = (turnKey?: string) => ({ type: 'GAME::HISTORY::TURN::FETCH', turnKey });
|
package panda.mvc.adaptor.multipart;
import panda.net.http.HttpHeader;
/**
* <p>
* This class provides support for accessing the headers for a file or form item that was received
* within a <code>multipart/form-data</code> POST request.
* </p>
*
*/
public class FileItemHeaders extends HttpHeader {
private static final long serialVersionUID = 1L;
}
|
Visualization of Big High Dimensional Data in a Three Dimensional Space This paper studies feasibility and scalable computing processes for visualizing big high dimensional data in a 3 dimensional space by using dimension reduction techniques. More specifically, we propose an unsupervised approach to compute a measure that is called visualizability in a 3 dimensional space for a high dimensional data. This measure of visualizability is computed based on the comparison of the clustering structures of the data before and after dimension reduction. The computation of visualizability requires finding an optimal clustering structure for the given data sets. Therefore, we further implement a scalable approach based on K-Means algorithm for finding an optimal clustering structure for the given big data. Then we can reduce the volume of a given big data for dimension reduction and visualization by sampling the big data based on the discovered clustering structure of the data. |
<reponame>LuxCore/dkitrish<gh_stars>1-10
package ru.job4j.banking;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import ru.job4j.banking.core.Account;
import ru.job4j.banking.core.Bank;
import ru.job4j.banking.core.NoSuchUserAccountException;
import ru.job4j.banking.core.NoSuchUserException;
import ru.job4j.banking.core.User;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
/**
* Tests Bank methods.
*/
public class BankTest {
/**
* Checking for expected exception.
*/
@Rule
public ExpectedException expectedException = ExpectedException.none();
/**
* Tests addition of user to list of users and theirs accounts.
*/
@Test
public void test1UserAdditionToUsersAccounts() {
Bank bank = new Bank();
User user = new User("Batman", "BM555777");
boolean actual = bank.addUser(user);
assertThat(true, is(actual));
}
/**
* Tests addition of user to list of users and theirs accounts.
*/
@Test
public void test2UserAdditionToUsersAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
User user2 = new User("Batman", "BM555777");
boolean actual = bank.addUser(user2);
assertThat(false, is(actual));
}
/**
* Tests deletion of user from list of users and theirs accounts.
*/
@Test
public void test1UserDeletionFromUsersAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
User user2 = new User("<NAME>", "XC-XXX");
bank.addUser(user2);
List<Account> actual = bank.deleteUser(user1);
assertThat(new ArrayList<>(), is(actual));
}
/**
* Tests deletion of user from list of users and theirs accounts.
* It is nothing to delete if no argument user in map.
*/
@Test
public void test2UserDeletionFromUsersAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
User user2 = new User("<NAME>", "XC-XXX");
bank.addUser(user2);
List<Account> actual = bank.deleteUser(user2);
List<Account> expected = new LinkedList<>();
assertThat(expected, is(actual));
}
/**
* Test of addition of account to users list of accounts.
*/
@Test
public void test1AccountAdditionToUserAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
Account account = new Account("Batman requisites", 69_666.69);
boolean actual = bank.addUserAccount("BM555777", account);
assertThat(true, is(actual));
}
/**
* Test of deletion of account from users list of accounts.
*/
@Test
public void test1AccountDeletionFromUserAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
Account account1 = new Account("Batman requisites 2", 69_666.69);
bank.addUserAccount("BM555777", account1);
Account account2 = new Account("Batman requisites 2", 77_777.69);
bank.addUserAccount("BM555777", account2);
Account actual = bank.deleteUserAccount("BM555777", account1);
Account expected = new Account("Batman requisites 2", 69_666.69);
assertThat(expected, is(actual));
}
/**
* Test of deletion of account from users list of accounts.
*/
@Test
public void test2AccountDeletionFromUserAccounts() {
Bank bank = new Bank();
User user1 = new User("Batman", "BM555777");
bank.addUser(user1);
Account account1 = new Account("Batman requisites 1", 11_111.11);
bank.addUserAccount("BM555777", account1);
Account account2 = new Account("Batman requisites 2", 11_222.22);
bank.addUserAccount("BM555777", account2);
Account account3 = new Account("Batman requisites 3", 33_333.33);
bank.addUserAccount("BM555777", account3);
Account account4 = new Account("Batman requisites 4", 44_444.44);
bank.addUserAccount("BM555777", account4);
List<Account> actual = new ArrayList<>(); //bank.getUserAccounts("BM555777");
actual.add(bank.deleteUserAccount("BM555777", account1));
actual.add(bank.deleteUserAccount("BM555777", account3));
List<Account> expected = new ArrayList<>();
expected.add(new Account("Batman requisites 1", 11_111.11));
expected.add(new Account("Batman requisites 3", 33_333.33));
assertThat(expected, is(actual));
}
/**
* Test of deletion of account from users list of accounts.
* <p>After deletion of last account list of account must not be equals to
* null, it must be empty.
*/
@Test
public void test3AccountDeletionFromUserAccounts() {
Bank bank = new Bank();
String batmanPassport = "BM555777";
User user1 = new User("Batman", batmanPassport);
bank.addUser(user1);
Account account1 = new Account("Batman requisites of last account", 69_666.69);
bank.addUserAccount(batmanPassport, account1);
bank.deleteUserAccount(batmanPassport, account1);
Optional<List<Account>> actual = bank.getUserAccounts(batmanPassport);
List<Account> expected = new ArrayList<>();
assertThat(expected, is(actual.get()));
}
/**
* Test of getting of all user accounts.
*/
@Test
public void testGetAllUserAccounts() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account account1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, account1);
Account account2 = new Account("LUX reqisites 2", 6_900_000.69);
bank.addUserAccount(cgPassport, account2);
Account account3 = new Account("LUX reqisites 3", 3_200_000.69);
bank.addUserAccount(cgPassport, account3);
Account account4 = new Account("LUX reqisites 4", 4_400_000.69);
bank.addUserAccount(cgPassport, account4);
List<Account> expected = new LinkedList<>();
expected.add(account1);
expected.add(account2);
expected.add(account3);
expected.add(account4);
Optional<List<Account>> actual;
try {
actual = bank.getUserAccounts("666");
} catch (NoSuchUserException e) {
actual = bank.getUserAccounts(cgPassport);
}
assertThat(expected, is(actual.get()));
}
/**
* Test of transfer of money.
*/
@Test
public void test1TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
Account cgAccount2 = new Account("LUX reqisites 2", 6_900_000.69);
bank.addUserAccount(cgPassport, cgAccount2);
Account cgAccount3 = new Account("LUX reqisites 3", 3_200_000.69);
bank.addUserAccount(cgPassport, cgAccount3);
Account cgAccount4 = new Account("LUX reqisites 4", 4_400_000.69);
bank.addUserAccount(cgPassport, cgAccount4);
final String asPassport = "AS696969";
User user2 = new User("<NAME>", asPassport);
bank.addUser(user2);
Account asAccount1 = new Account("DELUX reqisites 1", 69.69);
bank.addUserAccount(asPassport, asAccount1);
Account asAccount2 = new Account("DELUX reqisites 2", 0.69);
bank.addUserAccount(asPassport, asAccount2);
boolean actual = bank.transferMoney(cgPassport, "LUX reqisites 2",
asPassport, "DELUX reqisites 1", 5_000_000.0);
assertThat(true, is(actual));
}
/**
* Test of transfer of money.
*/
@Test
public void test2TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
Account cgAccount2 = new Account("LUX reqisites 2", 6_900_000.69);
bank.addUserAccount(cgPassport, cgAccount2);
Account cgAccount3 = new Account("LUX reqisites 3", 3_200_000.69);
bank.addUserAccount(cgPassport, cgAccount3);
Account cgAccount4 = new Account("LUX reqisites 4", 4_400_000.69);
bank.addUserAccount(cgPassport, cgAccount4);
final String asPassport = "AS696969";
User user2 = new User("<NAME>", asPassport);
bank.addUser(user2);
Account asAccount1 = new Account("DELUX reqisites 1", 69.69);
bank.addUserAccount(asPassport, asAccount1);
Account asAccount2 = new Account("DELUX reqisites 2", 0.69);
bank.addUserAccount(asPassport, asAccount2);
bank.transferMoney(cgPassport, "LUX reqisites 2",
asPassport, "DELUX reqisites 1", 5_000_000.0);
double expected = 1_900_000.69;
double actual = cgAccount2.getValue();
assertEquals(expected, actual, 0.001);
}
/**
* Test of transfer of money from one account to another of the same user.
* Проверять будем конечную сумму на счету получателя.
*/
@Test
public void test3TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
Account cgAccount2 = new Account("LUX reqisites 2", 6_900_000.69);
bank.addUserAccount(cgPassport, cgAccount2);
bank.transferMoney(cgPassport, "LUX reqisites 2",
cgPassport, "LUX reqisites 1", 500_000.69);
double expected = 2_000_001.38;
double actual = cgAccount1.getValue();
assertEquals(expected, actual, 0.001);
}
/**
* Test of transfer of money from one account to another of the same user.
* Проверять будем оставшуюся сумму на счету отправителя.
*/
@Test
public void test4TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
Account cgAccount2 = new Account("LUX reqisites 2", 6_900_000.69);
bank.addUserAccount(cgPassport, cgAccount2);
bank.transferMoney(cgPassport, "LUX reqisites 2",
cgPassport, "LUX reqisites 1", 500_000.69);
double expected = 6_400_000.0;
double actual = cgAccount2.getValue();
assertEquals(expected, actual, 0.001);
}
/**
* Test of transfer of money.
* Перешлём нулевую суммую.
*/
@Test
public void test5TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
final String asPassport = "AS696969";
User user2 = new User("<NAME>", asPassport);
bank.addUser(user2);
Account asAccount1 = new Account("DELUX reqisites 1", 69.69);
bank.addUserAccount(asPassport, asAccount1);
boolean actual = bank.transferMoney(cgPassport, "LUX reqisites 1",
asPassport, "DELUX reqisites 1", 0.0);
boolean expected = false;
assertThat(expected, is(actual));
}
/**
* Test of transfer of money.
* Перешлём суммую большую, чем есть на счету.
*/
@Test
public void test6TransferMoney() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Account cgAccount1 = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount1);
final String asPassport = "AS696969";
User user2 = new User("<NAME>", asPassport);
bank.addUser(user2);
Account asAccount1 = new Account("DELUX reqisites 1", 69.69);
bank.addUserAccount(asPassport, asAccount1);
boolean actual = bank.transferMoney(cgPassport, "LUX reqisites 1",
asPassport, "DELUX reqisites 1", 2_000_000.0);
boolean expected = false;
assertThat(expected, is(actual));
}
/**
* Находим пользователя по паспорту.
*/
@Test
public void test1GetUser() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
Optional<User> actual = bank.getUser("CG696969");
User expected = new User("<NAME>", cgPassport);
assertThat(expected, is(actual.get()));
}
/**
* Проверим поиск пользователя на наличие исключений.
*
* @throws NoSuchUserException when no user found.
*/
@Test
public void test2GetUser() throws NoSuchUserException {
String exceptionPassport = "CGXXX";
expectedException.expect(NoSuchUserException.class);
expectedException.expectMessage("Пользователь с номером паспорта "
+ exceptionPassport + " не найден!");
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user1 = new User("<NAME>", cgPassport);
bank.addUser(user1);
bank.getUser(exceptionPassport);
}
/**
* Проверяем поиск счёта пользователя.
*/
@Test
public void test1GetUserAccount() {
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user = new User("<NAME>", cgPassport);
bank.addUser(user);
Account cgAccount = new Account("LUX reqisites 1", 1_500_000.69);
bank.addUserAccount(cgPassport, cgAccount);
Optional<Account> actual = bank.getUserAccount(user, "LUX reqisites 1");
Account expected = new Account("LUX reqisites 1");
assertThat(expected, is(actual.get()));
}
/**
* Проверяем поиск счёта пользователя.
*
* @throws NoSuchUserAccountException when no user account found.
*/
@Test
public void test2GetUserAccount() throws NoSuchUserAccountException {
expectedException.expect(NoSuchUserAccountException.class);
expectedException.expectMessage("Счёт с реквизитами 'LUX reqisites 2' "
+ "у пользователя <NAME> не найден!");
Bank bank = new Bank();
final String cgPassport = "CG696969";
User user = new User("<NAME>", cgPassport);
bank.addUser(user);
Account cgAccount = new Account("LUX reqisites 1");
bank.addUserAccount(cgPassport, cgAccount);
bank.getUserAccount(user, "LUX reqisites 2");
}
}
|
package main
import (
"flag"
log "github.com/sirupsen/logrus"
)
func main() {
log.SetLevel(log.DebugLevel)
mode := flag.String("mode", "client", "Mode (server|get|*client)")
flag.Parse()
config := LoadConfig()
switch *mode {
case "server":
StartServer(config)
case "client":
StartClient(config)
}
}
|
Efficient algorithms to solve a class of resource allocation problems in large wireless networks We focus on efficient algorithms for resource allocation problems in large wireless networks. We first investigate the link scheduling problem and identify the properties that make it possible to compute solutions efficiently. We then show that the node on-off scheduling problem shares these features and is amenable to the same type of solution method. Numerical results confirm the efficiency of our technique for large scale problems. We also extend the technique to the case where the objective function is nonlinear showing that our technique blends smoothly with a sequential linear programming approach. Numerical results for a cross-layer design with a nonlinear fairness utility show that it is possible to compute optimal solutions for large wireless networks in reasonable CPU time. |
Chronic inflammation promotes retinoblastoma protein hyperphosphorylation and E2F1 activation. Chronic inflammation contributes to tumorigenesis. The retinoblastoma protein (pRb), in its hyperphosphorylated form, releases E2 promoter binding factor-1 (E2F1), which drives cell proliferation. Here, we show that pRb is hyperphosphorylated in both mouse and human colitis. In turn, pRb hyperphosphorylation is associated with release of E2F1 from pRb, resulting in the activation of E2F1 target molecules involved in proliferation and apoptosis. These observations provide insight into the in vivo mechanisms associated with chronic colon inflammation and increased colon cancer risk. |
/**
* This class is automatically generated by mig. DO NOT EDIT THIS FILE.
* This class implements a Java interface to the 'Bigmsg_frame_partMsg'
* message type.
*/
public class Bigmsg_frame_partMsg extends net.tinyos.message.Message {
/** The default size of this message type in bytes. */
public static final int DEFAULT_MESSAGE_SIZE = 66;
/** The Active Message type associated with this message. */
public static final int AM_TYPE = 110;
/** Create a new Bigmsg_frame_partMsg of size 66. */
public Bigmsg_frame_partMsg() {
super(DEFAULT_MESSAGE_SIZE);
amTypeSet(AM_TYPE);
}
/** Create a new Bigmsg_frame_partMsg of the given data_length. */
public Bigmsg_frame_partMsg(int data_length) {
super(data_length);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg with the given data_length
* and base offset.
*/
public Bigmsg_frame_partMsg(int data_length, int base_offset) {
super(data_length, base_offset);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg using the given byte array
* as backing store.
*/
public Bigmsg_frame_partMsg(byte[] data) {
super(data);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg using the given byte array
* as backing store, with the given base offset.
*/
public Bigmsg_frame_partMsg(byte[] data, int base_offset) {
super(data, base_offset);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg using the given byte array
* as backing store, with the given base offset and data length.
*/
public Bigmsg_frame_partMsg(byte[] data, int base_offset, int data_length) {
super(data, base_offset, data_length);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg embedded in the given message
* at the given base offset.
*/
public Bigmsg_frame_partMsg(net.tinyos.message.Message msg, int base_offset) {
super(msg, base_offset, DEFAULT_MESSAGE_SIZE);
amTypeSet(AM_TYPE);
}
/**
* Create a new Bigmsg_frame_partMsg embedded in the given message
* at the given base offset and length.
*/
public Bigmsg_frame_partMsg(net.tinyos.message.Message msg, int base_offset, int data_length) {
super(msg, base_offset, data_length);
amTypeSet(AM_TYPE);
}
/**
/* Return a String representation of this message. Includes the
* message type name and the non-indexed field values.
*/
public String toString() {
String s = "Message <Bigmsg_frame_partMsg> \n";
try {
s += " [part_id=0x"+Long.toHexString(get_part_id())+"]\n";
} catch (ArrayIndexOutOfBoundsException aioobe) { /* Skip field */ }
try {
s += " [buf=";
for (int i = 0; i < 64; i++) {
s += "0x"+Long.toHexString(getElement_buf(i) & 0xff)+" ";
}
s += "]\n";
} catch (ArrayIndexOutOfBoundsException aioobe) { /* Skip field */ }
return s;
}
// Message-type-specific access methods appear below.
/////////////////////////////////////////////////////////
// Accessor methods for field: part_id
// Field type: int, unsigned
// Offset (bits): 0
// Size (bits): 16
/////////////////////////////////////////////////////////
/**
* Return whether the field 'part_id' is signed (false).
*/
public static boolean isSigned_part_id() {
return false;
}
/**
* Return whether the field 'part_id' is an array (false).
*/
public static boolean isArray_part_id() {
return false;
}
/**
* Return the offset (in bytes) of the field 'part_id'
*/
public static int offset_part_id() {
return (0 / 8);
}
/**
* Return the offset (in bits) of the field 'part_id'
*/
public static int offsetBits_part_id() {
return 0;
}
/**
* Return the value (as a int) of the field 'part_id'
*/
public int get_part_id() {
return (int)getUIntBEElement(offsetBits_part_id(), 16);
}
/**
* Set the value of the field 'part_id'
*/
public void set_part_id(int value) {
setUIntBEElement(offsetBits_part_id(), 16, value);
}
/**
* Return the size, in bytes, of the field 'part_id'
*/
public static int size_part_id() {
return (16 / 8);
}
/**
* Return the size, in bits, of the field 'part_id'
*/
public static int sizeBits_part_id() {
return 16;
}
/////////////////////////////////////////////////////////
// Accessor methods for field: buf
// Field type: short[], unsigned
// Offset (bits): 16
// Size of each element (bits): 8
/////////////////////////////////////////////////////////
/**
* Return whether the field 'buf' is signed (false).
*/
public static boolean isSigned_buf() {
return false;
}
/**
* Return whether the field 'buf' is an array (true).
*/
public static boolean isArray_buf() {
return true;
}
/**
* Return the offset (in bytes) of the field 'buf'
*/
public static int offset_buf(int index1) {
int offset = 16;
if (index1 < 0 || index1 >= 64) throw new ArrayIndexOutOfBoundsException();
offset += 0 + index1 * 8;
return (offset / 8);
}
/**
* Return the offset (in bits) of the field 'buf'
*/
public static int offsetBits_buf(int index1) {
int offset = 16;
if (index1 < 0 || index1 >= 64) throw new ArrayIndexOutOfBoundsException();
offset += 0 + index1 * 8;
return offset;
}
/**
* Return the entire array 'buf' as a short[]
*/
public short[] get_buf() {
short[] tmp = new short[64];
for (int index0 = 0; index0 < numElements_buf(0); index0++) {
tmp[index0] = getElement_buf(index0);
}
return tmp;
}
/**
* Set the contents of the array 'buf' from the given short[]
*/
public void set_buf(short[] value) {
for (int index0 = 0; index0 < value.length; index0++) {
setElement_buf(index0, value[index0]);
}
}
/**
* Return an element (as a short) of the array 'buf'
*/
public short getElement_buf(int index1) {
return (short)getUIntBEElement(offsetBits_buf(index1), 8);
}
/**
* Set an element of the array 'buf'
*/
public void setElement_buf(int index1, short value) {
setUIntBEElement(offsetBits_buf(index1), 8, value);
}
/**
* Return the total size, in bytes, of the array 'buf'
*/
public static int totalSize_buf() {
return (512 / 8);
}
/**
* Return the total size, in bits, of the array 'buf'
*/
public static int totalSizeBits_buf() {
return 512;
}
/**
* Return the size, in bytes, of each element of the array 'buf'
*/
public static int elementSize_buf() {
return (8 / 8);
}
/**
* Return the size, in bits, of each element of the array 'buf'
*/
public static int elementSizeBits_buf() {
return 8;
}
/**
* Return the number of dimensions in the array 'buf'
*/
public static int numDimensions_buf() {
return 1;
}
/**
* Return the number of elements in the array 'buf'
*/
public static int numElements_buf() {
return 64;
}
/**
* Return the number of elements in the array 'buf'
* for the given dimension.
*/
public static int numElements_buf(int dimension) {
int array_dims[] = { 64, };
if (dimension < 0 || dimension >= 1) throw new ArrayIndexOutOfBoundsException();
if (array_dims[dimension] == 0) throw new IllegalArgumentException("Array dimension "+dimension+" has unknown size");
return array_dims[dimension];
}
/**
* Fill in the array 'buf' with a String
*/
public void setString_buf(String s) {
int len = s.length();
int i;
for (i = 0; i < len; i++) {
setElement_buf(i, (short)s.charAt(i));
}
setElement_buf(i, (short)0); //null terminate
}
/**
* Read the array 'buf' as a String
*/
public String getString_buf() {
char carr[] = new char[Math.min(net.tinyos.message.Message.MAX_CONVERTED_STRING_LENGTH,64)];
int i;
for (i = 0; i < carr.length; i++) {
if ((char)getElement_buf(i) == (char)0) break;
carr[i] = (char)getElement_buf(i);
}
return new String(carr,0,i);
}
}
|
Case Report: Complications in ECT due to Fmr1-Premutation? After a short summary on Fragile-X-Syndrome and fmr1-premutation, we present a case report on a patient pre diagnosed with fmr1-premutation undergoing a series of 14 ECTs because of a treatment resistant major depressive episode. In the course of the series, several unexpected adverse incidents occurred. According to a literature research we did on PubMed, we concluded that these adverse incidents may be related to fmr1-premutation and associated abnormities in cerebral and other physical features. *Correspondence: Anna Julia Lenz, University clinic Bonn, department for psychiatry and psychotherapy, Venusberg-Campus 1 53127 Bonn, Germany, +4922828731923, Fax: +4922828716097. Received: 22 June 2021; Accepted: 29 July 2021 Introduction Fragile-X-syndrome is one of the main causes for inherited intellectual disability. Through x-linked recessive inheritance, FXS is caused by an unstable mutation on the fmr1-gene on Xq27.3 leading to more than 200 CGG-repeats in this area. As a result, the production of fmr1's gene product declines or is abandoned completely. Affected individuals suffer from mental retardation; they are often affected by psychiatric disorders, especially autism or adhd, and different medical conditions, e.g. epilepsy or obesity. Individuals with 55 up to 200 CGG-repeats are classified as premutations carriers. In this range, mutation leads to an increased production of fmrp. Premutation carriers are at a higher risk regarding psychiatric disorders as non-carriers; 50% suffer from depression or anxiety, some show also mild cognitive deficits such as attention deficits or a delayed processing speed. Furthermore, premutations carriers are likely to develop hypertension, thyroid disorders or several autoimmune diseases (e.g. rheumatoid arthritis, Raynaud's syndrome. Additionally, premutations is associated with primary ovarian failure in female carriers and tremor-ataxiasyndrome, especially in male patients, each of both rising again the risk of other pathologies (e.g. osteoporosis, epilepsy. As the mutation is instable, there is the possibility of passing on premutations as well as full mutation to descendants while the risk of anticipation depends on the number of maternal CGG-repeats. The diagnosis of FXS or fmr1-premutation is verified by polymerase chain reaction. To specify the result of full mutation further in order to facilitate a prediction concerning the proband's clinical prognosis, southern blot might be used to identify methylation status limiting fmrp's possibly remaining production further. Electroconvulsive therapy (ECT) is a common treatment of major depression and other psychiatric diseases. Grand mal seizures are induced by the use of unilateral or bilateral electric stimulation of the brain while the patient is undergoing general anaesthesia. Among others, possible side effects of ECT include headache, nausea, intermittent cardiac arrest, rise in blood pressure and heart rate or negative influence on cognitive abilities. Especially cardiovascular reactions may depend on parasympathetic and sympathetic reaction to stimulation or convulsion. Side effects, especially cognitive ones, are supposed to occur more often and more pronounced after bilateral stimulation. Case Report A female 38 year old teacher with fmr1-premutation was admitted to psychiatric ward because of treatment resistant major depression (HDS: 34 points) with comorbid panic disorder and obsessivecompulsive disorder for ECT in January 2018. Approximately four years earlier, human genetic analysis regarding FXS was run due to mental retardation of her niece and showed 100 CGG repeats in the patients sample, classifying her as premutations carrier. She was at a high risk for POF but not diagnosed with FXTAS which her father suffered from. Additionally to being premutations carrier, the patient suffered from Crohns disease, primary thrombophilia, hypothyroidism, a hepatic haemangioma and obesity (BMI 32). She was a married teacher and had a 2.5 months old son born after she had received an egg donation in order to avoid passing on the mutation. EEG recorded increased beta activity, probably due to medication with lorazepam. Heart rate and blood pressure were within normal range (87bpm, 120/70mmHg); ECG revealed no pathologies. As psychiatric medication, she initially took fluoxetine (40mg), risperidone (2mg), quetiapine (600mg) and lorazepam (4.5mg); additionally, cortisone (30mg) was applied and thyroid hormone (12.5g) substituted. In the course of the treatment, medication was switched from risperidone to aripiprazole (20mg); the dose of quetiapine was reduced to 25mg due to increased liver values, the dose of fluoxetine to 30mg because of excessive serum levels. The dose of lorazepam was reduced to 3.25mg before the first ECT session. According to guidelines, the patient received a series of 14 ECTs with two sessions per week; anaesthesia was performed with propofol (dosage adjusted to body weight). During the first 9 sessions, unilateral stimulation was performed (max. 100%). After a slight reduction of lorazepam (-0.25mg, leaving in total 3mg/d) after the fifth session, she showed a hyperactive hemodynamic response after the sixth stimulation in the postictal episode with a heart rate up to 160 bpm and a systolic blood pressure up to 220mmHg. Furthermore, she presented a spontaneous second convulsive seizure in the postictal episode ceasing spontaneously after approximately ten seconds. Subsequently, the dose of lorazepam was increased again to avoid further unscheduled seizures. Additionally, a long-term surveillance of blood pressure was run; as it revealed mild hypertension (average: 132/83mmHg), low-dose Ramipril (2.5mg) was given to prevent further cardiovascular complications. Under the medication with Ramipril, blood pressure descended to normal range. The next three sessions of ECT were performed without any incidents. As the depression did not respond sufficiently to unilateral stimulation, a switch to bilateral stimulation was performed in session 10 (20%). The patient developed a severe hyperactive hemodynamic response in the postictal period (heart rate up to 180 bpm, 240/160mmHg; approximately 200% compared to baseline values) which declined only after she was given urapidile intravenously (dose unknown). Hereinafter, decision to go back to unilateral stimulation was made. She received further three sessions of unilateral ECT as well as a single maintaining session four weeks after having completed the series without any additional adverse incidents. The score in HDS receded to 13 points. Discussion To our best knowledge, this is the first published case of an fmr1-premutation carrier receiving ECT as a treatment of severe depression. Regarding the observed side effects during the series, the question of underlying reasons for the patient's vegetative sensibility to ECT arose as the incensement of blood pressure and heart rate seems to be rather pronounced compared to the average cardiovascular reaction to ECT. It is known that a hyperactive cardiovascular reaction may occur in the postictal phase; the average postictal enlargement seems to be about more or less 150% of the baseline values. The severity of sympathetic reaction may depend on individual biological factors and might be more pronounced in patients with essential hypertension due to a diminished elasticity in the vessels' walls. Besides the mild essential hypertension diagnosed and treated in the course of the series and possibly having influence on the cardiovascular reaction to stimulation, we wondered if there might be another, premutation-associated contributory factor complicating the course. We found case reports addressing the possible influence of untreated obstructive sleep apnoea syndrome on general anaesthesia during ECT including cardiovascular complications similar to our patient's reaction. As the risk OSAS might be elevated in premutation carriers and as the patient had other risk factors (overweight, medication with lorazepam, snoring), suspicion of OSAS was raised; evaluation through cardiorespiratory polygraph was run but came off negative. Another factor relevant to the regulation of the cardiovascular reaction is the autonomic nervous system including both the parasympathetic and the sympathetic part. There is evidence that fmr1-premutation carriers' vagal tonus is diminished compared to normal population. This might be an explanation for the patient's rather attenuated physical reaction to ECT; corresponding to the diminished vagal tonus, the sympathetic nervous system's influence on blood pressure and heart rate may be stronger than in non-fmr1-affected individuals. Thus, it might take longer to regulate blood pressure and heart rate down again after the sympathetic reaction to the stimulus. Furthermore, we hypothesize that the patient's central nervous system might be more sensitive to electric stimulation than a nonpremutations-carrier's one. As some fmr1-premutation carriers are generally at a higher risk for seizures, we attribute the additional seizure after the sixth session of ECT to the previous minimal reduction of lorazepam seemingly leading, in this case, to a significant lower seizure threshold. As a result, we conclude that it may be useful to be more sensitive regarding possibly elevated cardiovascular risks in fmr1premutation carriers undergoing ECT. Subsequently, routine long-term surveillance of blood pressure and heart rate might be recommendable as well as an up titration of low-dose antihypertensive medication pre-intervention ally, maybe even in patients with baseline values within normal range. Furthermore, one should consider a possibly increased probability of ECT-induced unscheduled seizures before reducing antiepileptic drugs during the course of ECT. |
Integrating Forest Carbon Sequestration Into a Cap-and-Trade Program to Reduce Net CO2 Emissions Problem: Most research on planning to mitigate climate change has focused on reducing CO2 emissions from coal-fred power plants or the transportation sector. The contribution of forests to lowering net CO2 emissions has largely been overlooked. U.S. forests already offset about one eighth of the nation's annual CO2 emissions and have the potential to offset more, all at a relatively low cost. It will not be easy to integrate forest carbon sequestration into a cap-and-trade program to reduce net CO2 emissions, however. Purpose: I explore what forest land use planning, forestry management practices, and land preservation strategies would be required to integrate forest carbon seques-tration into a cap-and- trade program, and explain the role planning and planners can play in promoting forest carbon seques-tration. Methods: The Regional Greenhouse Gas Initiative is a 10-state cap-and-trade program to reduce greenhouse gas emissions from coal-fred power plants in the northeastern United States. It provides a case study of how forest carbon sequestration can be included in a cap-and-trade program. Meanwhile, California has devised certifable carbon credits from forestland. I analyze both approaches and generalize from them. Results and conclusions: To promote forest carbon sequestration through a cap-and-trade program will require ensuring the permanence of CO2 reductions, minimizing leakage from forestland conversion, and obtaining prices for carbon offsets that are high enough to induce forestland owners to participate in the program and offer them for sale. The capital needed to purchase and monitor permanent forest conservation easements as well as to provide a stream of annual income for timberland owners may require a national system of carbon credits. Ideally, the easements would be set up in advance through investments by govern-ment or nonprofts, so that landowners will be ready to sell credits when they are demanded. Takeaway for practice: A cap-and-trade system could be a cost-effective way to lower net CO2 emissions if it included certifable, tradable credits from forestland preservation and management, and if the price of carbon credits were high enough to induce forest landowners to offer credits. To promote forest carbon sequestration, planners in rural areas should work with the local, state, and federal governments and nonproft land trusts to zone forestland at low densities, to preserve forest land through acquiring conservation ease ments, and to fashion forest management plans that ensure long cycles of timber harvesting. Planners in metropolitan areas should promote tree planting and tree retention ordinances to protect, expand, and manage urban forests to absorb greenhouse gases. Research support: None. |
// Count retrieves the value of the counter.
func Count(ctx context.Context) (int, error) {
total := 0
q := datastore.NewQuery(shardKind)
for t := q.Run(ctx); ; {
var s simpleCounterShard
_, err := t.Next(&s)
if err == datastore.Done {
break
}
if err != nil {
return total, err
}
total += s.Count
}
return total, nil
} |
<gh_stars>0
import type { Rule, Shortcut } from "@unocss/core";
import { Theme } from "../types";
import { rem, spacing } from "../utils/Handlers";
export const spacingRules: Array<Rule<Theme>> = [
[/^m-(.*)$/, rem("margin")],
[/^m-(.*)-(.*)$/, spacing("margin")],
[/^p-(.*)$/, rem("padding")],
[/^p-(.*)-(.*)$/, spacing("padding")],
[/^gap-(.*)$/, rem("gap")],
[/^gap-x-(.*)$/, rem("column-gap")],
[/^gap-y-(.*)$/, rem("row-gap")],
[/^top-(.*)$/, rem("top")],
[/^bottom-(.*)$/, rem("bottom")],
[/^right-(.*)$/, rem("right")],
[/^left-(.*)$/, rem("left")],
[/^w-(.*)$/, rem("width")],
[/^min-w-(.*)$/, rem("min-width")],
[/^max-w-(.*)$/, rem("max-width")],
[/^h-(.*)$/, rem("height")],
[/^min-h-(.*)$/, rem("min-height")],
[/^max-h-(.*)$/, rem("max-height")],
];
export const spacingShortcuts: Array<Shortcut<Theme>> = [
["gap", "gap-md"],
["p", "p-md"],
];
|
// ChengChaoFeng.c
// pal 1997.05.11
#include <ansi.h>
inherit NPC;
void create()
{
set_name("程嘲风", ({ "<NAME>", "cheng", "chaofeng", }));
set("long",
"他是一位身宽体胖的老者,身穿一件白布长袍。\n"
"他正笑嘻嘻地看着你,好象对你颇有好感。\n"
);
set("title",HIG "明教" HIC "青龙坛" NOR "坛主");
set("level",5);
set("gender", "男性");
set("attitude", "friendly");
set("age", 51);
set("shen_type", 1);
set("str", 20);
set("int", 20);
set("con", 20);
set("dex", 20);
set("max_qi", 450);
set("max_jing", 300);
set("neili", 600);
set("max_neili", 600);
set("jiali", 50);
set("combat_exp", 50000);
set("score", 100);
set_skill("force", 70);
set_skill("hunyuan-yiqi", 70);
set_skill("dodge", 70);
set_skill("shaolin-shenfa", 70);
set_skill("finger", 68);
set_skill("nianhua-zhi", 68);
set_skill("parry", 70);
set_skill("sword", 80);
set_skill("damo-jian", 80);
set_skill("buddhism", 70);
set_skill("literate", 70);
map_skill("force", "hunyuan-yiqi");
map_skill("dodge", "shaolin-shenfa");
map_skill("finger", "nianhua-zhi");
map_skill("parry", "damo-jian");
map_skill("sword", "damo-jian");
prepare_skill("finger", "nianhua-zhi");
create_family("明教", 4, "青龙坛坛主");
setup();
carry_object("/d/mingjiao/obj/baipao")->wear();
}
|
Possible nonspecific immunopotentiation by 2,4-dinitrochlorobenzene sensitization in patients with Hodgkin's disease. Various immunological parameters were evaluated in untreated Hodgkin's patients before and after sensitization with dinitrochlorobenzene (DNCB). The ratio (r) of these parameters after/before DNCB sensitization for patients and second/first samples in the controls were calculated. There were significantly more patients in the r greater than 1.1 group for PHA and Con A responses and for peripheral blood T cell percentages. These data suggest that DNCB sensitization may have a nonspecific immunopotentiation effect. |
On weakening the Deduction Theorem and strengthening Modus Ponens This paper studies, with techniques of Abstract Algebraic Logic, the effects of putting a bound on the cardinality of the set of side formulas in the Deduction Theorem, viewed as a Gentzenstyle rule, and of adding additional assumptions inside the formulas present in Modus Ponens, viewed as a Hilbertstyle rule. As a result, a denumerable collection of new Gentzen systems and two new sentential logics have been isolated. These logics are weaker than the positive implicative logic. We have determined their algebraic models and the relationships between them, and have classified them according to several standard criteria of Abstract Algebraic Logic. One of the logics is protoalgebraic but neither equivalential nor weakly algebraizable, a rare situation where very few natural examples were hitherto known. In passing we have found new, alternative presentations of positive implicative logic, both in Hilbert style and in Gentzen style, and have characterized it in terms of the restricted Deduction Theorem: it is the weakest logic satisfying Modus Ponens and the Deduction Theorem restricted to at most 2 side formulas. The algebraic part of the work has lead to the class of quasiHilbert algebras, a quasivariety of implicative algebras introduced by Pla and Verd in 1980, which is larger than the variety of Hilbert algebras. Its algebraic properties reflect those of the corresponding logics and Gentzen systems. (© 2004 WILEYVCH Verlag GmbH & Co. KGaA, Weinheim) |
/**
* Converts MeResendCodeRequestDTO to ResendCodeRequestDTO with user details from context.
*
* @param meResendCodeRequestDTO meResendCodeRequestDTO.
* @return resendCodeRequestDTO.
*/
private ResendCodeRequestDTO convertToResendCodeRequest(MeResendCodeRequestDTO meResendCodeRequestDTO) {
ResendCodeRequestDTO resendCodeRequestDTO = new ResendCodeRequestDTO();
if (meResendCodeRequestDTO != null) {
resendCodeRequestDTO.setProperties(meResendCodeRequestDTO.getProperties());
}
resendCodeRequestDTO.setUser(getUser());
return resendCodeRequestDTO;
} |
Developers create software programs for computers.
1 What Is the Job of a Product Developer?
2 What Do CIS Majors Do?
Developers work in computer manufacturing companies or for software publishers. They are responsible for the design, testing and maintenance of software programs for computer operating systems or applications, such as word processing or database management systems. Developers may create software programs customized for a specific organization, or software that is suitable for a wide variety of consumers or business users.
The specific role of developers varies from company to company. They may be part of a team that includes analysts, programmers and project managers, or they may take on all the roles required to develop software programs. Jupitermedia Corp. notes that the key responsibilities of a developer are to understand the problem that the software is supposed to solve, design a solution, and develop and test it before releasing it to customers.
Before they begin detailed design, developers work with users to obtain a full understanding of the software’s requirements. They analyze users’ needs and recommend new software programs or upgrades to existing programs. In larger teams, developers may collaborate with business or systems analysts who carry out the detailed investigation into software requirements.
Developers translate the functional requirements of the software into a specification for detailed design. They may provide instructions that enable computer programmers to create the code for the software or they may write the code themselves. If they are instructing programmers, developers must have a detailed understanding of code so that they can evaluate the work of other team members.
Software testing is a critical part of the development process. Developers test programs to ensure that they meet the requirements of the specification and that they are free of errors, known as bugs. Developers test the programs by entering data and trying out all program functions. They may also ask users to try test versions of programs to ensure that they are easy to use.
Developers prepare detailed documentation for software programs. Documentation provides a description of the functions and operation of the software that team members can refer to if they need to modify or upgrade the program. Documentation also provides the basis for operating instructions, guides for users, training programs and marketing guides.
Software development is a complex process that is broken into a number of stages. Developers collaborate with other members of the team to ensure that programs are completed on time and within budget. They establish schedules and monitor progress against key dates. Developers may also monitor costs against project budgets and prepare reports for team leaders.
Software developers earned a median annual salary of $102,370 in 2016, according to the U.S. Bureau of Labor Statistics. On the low end, software developers earned a 25th percentile salary of $78,570, meaning 75 percent earned more than this amount. The 75th percentile salary is $129,310, meaning 25 percent earn more. In 2016, 1,256,300 people were employed in the U.S. as software developers.
Linton, Ian. "Role of a Developer." Work - Chron.com, http://work.chron.com/role-developer-16221.html. Accessed 20 April 2019. |
pH electrodes based on iridium oxide films for marine monitoring The pH is an important parameter that affects the growth and development of marine organisms, environmental changes, and industrial and agricultural production processes. Nowadays, important trends in pH detection and analysis are higher stability, adaptation to extreme environmental conditions, miniaturization, portability, and digital intelligence. Several studies have focused on the application of the iridium oxide film (IROF) pH electrodes in water quality monitoring and physiological analysis. The central aim of this work was to review the preparation techniques of the IROF pH electrodes and to expand their application in the field of marine monitoring. The studied methods include electrochemical deposition, electrochemical growth, sputtering deposition, heat treatment, and novel preparation methods. The IROF pH electrodes prepared via these methods are more sensitive, have a wider pH measurement ranges, and can be miniaturized further than traditional glass and pH photometer. Hence, in environmental analysis, combining IROF pH electrodes with wireless technology for the physiological and biochemical analysis of marine organisms, seawater, and sediment pore water is an important development tendency. © 2020 Elsevier B.V. All rights reserved. Introduction The pH is an essential parameter in ecological environments and biological organisms, and different pH values can influence the growth rate of cells, metabolic rates, cell division, and differentiation. Some researchers have shown that the carbon cycles of marine coral ecosystems and even of global ecosystems are affected by fluctuating pH values, and pH fluctuations caused by wastewater discharge into upstream rivers will also affect downstream ecosystems. Therefore, stable and continuous pH monitoring is essential in agroecology, environmental monitoring/detection, industrial production, and other fields. In particular, due to the continuous increase in global carbon dioxide emissions, a large amount of CO 2 is absorbed by the ocean, making ocean acidification a global environmental problem. Based on previous studies, ocean acidification will further release heavy metals deposited in the ocean sediment, reduce the calcification degree of shellfish, and dissolve the foundation of coral reefs. The exact impacts of ocean acidification on the marine biological chain are currently not clear, requiring the use of instruments that can stably monitor the pH of the ocean. Traditional pH glass membrane electrodes can maintain stable and accurate pH measurements in commonly used pH ranges and are still widely used. However, these electrodes have some limitations, such as large probe volumes, difficulties in miniaturization, easily damaged hydrated glass films, and high impedance and long response times under alkaline conditions. Over the past decades, scholars have found that metal oxides such as PtO 2, IrO 2, RuO 2, OsO 2, Ta 2 O 5, Ti 2 O, PdO, and SnO 2 can be used to prepare pH sensors. Compared with traditional pH glass membrane electrodes, these metal oxide pH electrodes exhibit super-Nernst phenomena and have simple and cheaper fabrication processes. Moreover, using these metal oxides as pH sensing materials makes it easier to miniaturize a variety of sensor shapes. Among these metal oxide electrode materials, iridium and its oxides have attracted the most attention. Electrodes based on iridium and its metal oxides are relatively stable and insoluble in measured solutions, but also have short response times, wide measurement ranges, high temperature (up to 250 C) and pressure limitations, and tolerance to corrosive environments. Hence, some researchers combined these advantages of IROFs to further develop the application of iridium and its metal oxide film pH electrode in specific fields. This paper will focus on the summarization of its preparation methods, research in the field of marine analysis and monitoring, and its potential application in the future. Preparation of iridium oxide films electrodes Iridium oxide films have been used in a variety of electrochromic materials, electrocatalysts, nerve stimulants, supercapacitors, and pH-sensing materials. When used for pH sensing, these materials are mainly divided into hydrated and unhydrated membranes. The thickness and particle size of IROFs fabricated by different methods are diverse. Usually, pH electrodes prepared using electrochemical growth and electrochemical deposition are hydrated IROF electrodes. The preparation methods of the anhydrous films mainly include heat-treatment and heatsputtering processes [19,; these methods are described in detail in Fig. 1. Electrochemical deposition Electrodeposition is used to deposit IROFs on various substrates, which are generally conductive materials such as lead, platinum, stainless steel, and graphite. Some non-conductive substrates, such as glass and plastics, have also been used. Electrochemical deposition uses a complex deposition solution, and the role of each component should be considered to ensure uniformity and stability of the coating. In previous works, standard electrodeposition solutions have contained IrCl 4, iridium (IV) oxalate, and some other weak salt ligand ions. During electrodeposition, it is necessary to adjust the pH value of the electrodeposition solution, control the potential sweep range during deposition, and select the appropriate deposition time. Common electrochemical deposition methods are listed in Table 1, which mainly include continuous current deposition, pulse voltage deposition, potential scanning, and cyclic voltammetry (CV). The requirements of the deposition solution for IROFs deposited by the above methods are different, and therefore, the allocation of deposition solution components has become critical for electrochemical deposition. Zea et al. attempted to directly print a novel platinum nanoparticle ink to promote the adhesion of a deposited sensing material. Then, a solid-state pH electrode was functionalized with anodic electrodeposited iridium oxide films on a rough nanostructured platinum-printed layer. Because it is compatible with any electrode design in the micrometer range, the design shown in Fig. 2A provides a novel intelligent wearable monitoring technology. Fig. 2B demonstrates a new IROF pH sensor based on a flexible polyimide substrate fabricated by Huang et al. The main procedure included the deposition of a 7-nm thick layer of Cr on a polyimide substrate, followed by a 0.1-mm thick layer of Au. Subsequently, the electrodes were exposed to an SU-8 sacrificial layer, and the IROF was formed by a sol-gel process. Fig. 2C shows the IROF pH electrode at the micron level. Such IROF pH electrodes on flexible substrates are suitable for practical applications on curved surfaces and will enable numerous new applications. Electrochemical growth Previously, some scholars have applied a layer of iridium salt, followed by cyclic voltammetry deposition, in a method called cyclic voltammetric growth. A distinction can be made between these methods by noting that any deposition or current circulation after applying a coating applied is called an electrodeposition method. One in which the iridium wire is not coated and is directly embedded in the electrochemical cycle is an electrochemical growth method. The IROF electrodes made by electrochemical cyclic voltammetry are commonly formed in acidic (sulfuric acid) or alkaline (sodium hydroxide) electrolyte solutions. When pure iridium wire is electrochemically activated in an electrolyte solution, the hydrated IROFs will grow on the surface of the iridium wire. Cyclic voltammetry is a simple method for preparing IROFs because it is fast and only the cyclic potential needs to be controlled and the number of cyclic scans needs to be optimized. However, the electrodes created by cyclic voltammetry have poor reproducibility and potential drift. The IROF-based pH electrodes prepared by cyclic voltammetry usually exhibit super-Nernst phenomenon, and electrode sensitivity ranges from 60 to 80 mV/pH unit. Sputtering deposition methods Sputtering deposition, sputtering iridium salt on a substrate in oxygen or nitrogen atmosphere, called sputtering iridium oxide films (SIROFs), is also a popular method for preparing IROFs. Single crystal silicon, Al 2 O 3, stainless steel, and other metallic materials are commonly used as substrates for sputtering. The sensitivity E-pH of an electrode films manufactured by a sputtering method is generally similar to the Nernst standard value and has high repeatability. Compared with electrodes prepared by cyclic voltammetry, the stability of the electrodes potential and the interference characteristics of antioxidant reduction ions are often affected by other physical parameters (e.g., O 2, Ar partial pressure, temperature, humidity, deposition rate, substrate temperature, electric field, etc.). The sputtering deposition method requires relatively harsh experimental conditions and expensive equipment, leading to fewer reports using this method. Heat-treatment methods Compared with the other methods, a remarkable advantage of the thermal oxidation method is that IROF electrodes fabricated in this manner have long-term stability and less potential drift. The thermal oxidation method requires matrix materials capable of tolerating high temperatures, since this technique requires temperatures of 300-400 C, with some requiring temperatures higher than 800 C. The thermal oxidation method mainly involves IrCl 3 thermal decomposition, molten salt oxidation, and direct ignition of a mixture containing iridium or a sol-gel which is coated on a thermostable substrate. Then, the IROFs are formed by high-temperature oxidation. The molten salt oxidation method involves coating nitrate or carbonate on a high-temperature resistant matrix, followed by quenching the oxide solution one or more times to form an IROF on the surface. The direct burning method involves soaking iridium wire in a stable alkali solution, which quenches and oxidizes the iridium surface at high temperatures to produce the IROFs. The main distinction between this method and cyclic voltammetry under alkaline conditions is the oxidation mode. Since thermal oxidation requires high temperatures, polymeric materials and photoimpedance materials cannot be used as sacrificial oxide layers when coating a solution on the electrode matrix. At the same time, the length and frequency of burning, the oxidation temperature, and the cooling method can affect the thickness of the film and the morphology of surface particles. Furthermore, oxidation temperature and thermal oxidation time also affect the pH response time. Since most IROFs obtained by thermal oxidation are anhydrous, it is necessary to complete the hydration reaction on the surface before determining the pH of the solution. Therefore, a slightly longer response time may be required. Although the electrodes prepared by thermal oxidation have excellent stabilities, cracks in the oxide film on the surface naturally occur due to the high temperatures, which reduces the service life of the electrodes. Thermal oxidation and sputtering methods require high-temperature oxidation, but these also require more complex and expensive systems, significantly limiting the applications of these methods. New preparation method Apart from the traditional preparation methods of IROFs pH electrodes using iridium wires or other substrates, there are also relatively novel methods where nano iridium oxide is directly pressed and oxidized in an oxygen atmosphere. This method uses prepared nano iridium oxide doped with a polymethyl methacrylate (PMMA) matrix as an active sensing material for conductors and hydrogen ions. Nano iridium oxide particles are dispersed in a PMMA suspension, rapidly precipitated in water, and subsequently, the nano iridium oxide composite electrode is formed using compression molding. Direct oxidation under an oxygen atmosphere oxidizes very few areas on the surface of the electrode, while the use of large-scale oxidation results in uneven and incomplete surface oxidation. Ndobo-Epoy et al. covered an iridium filament surface with a poly-p-xylene insulating layer, while Ga used a focused ion beam to open an iridium filament vertex which was then oxidized in an oxygen atmosphere for 12 h to prepare a nanoscale IROF pH electrode. Application of iridium oxide film pH electrodes In the above, several methods of making IROFs are introduced. Each of these methods has its own advantages and disadvantages. The specific situation depends on the existing conditions and study requirements. The most important aspect is to introduce the differences between these new IROF pH electrodes and electrodes generated by traditional methods and the trend of innovative application in marine environment analysis. The relationship between IROF electrodes and its application in marine monitoring is shown in Fig. 3. Marine organisms Based on the miniaturization of IROF pH electrodes, pH values can be measured at specific sites for cell and physiological reactions to further understand the surface proton transfer of some crustaceous marine organisms in acidified oceans. Sensors used to measure biochemical pH values in cells should use selected appropriate matrix materials for the objects they detect and monitor. They also need to be miniaturized, typically trending to the mm-pm level, to avoid effects on the growth and biochemical reactions of biological cells. Carbon fibers are the most common matrix materials in biochemistry and physiology and simultaneously meet such high requirements. There have been several excellent reviews and books summarizing the direction and application for biochemical and physiological analyses. For example, Cork et al. inserted iridium oxide microelectrodes into the subdiaphragmatic vagus nerve of anesthetized rats, and the microelectrodes were sufficiently pH-sensitive to quickly detect changes in pH values associated with the intestinal hormone cholecystokinin (CCK) and gastric distention. This work demonstrated the vital role of pH in the growth and reproduction of organisms. At the same time, the IROF pH electrode provides a new support for exploring the physiological pH fluctuation of marine organisms. Generally, the primary purpose of a pH sensor is to determine the pH of marine environments to explain and evaluate how the pH affects the survival of organisms. Based on previous studies, pH fluctuations caused by ocean acidification adversely affect the survival of calcified polychaete species, mussel larvae, and bivalves. For example, Lane et al. revealed that when the pH of the metamorphic larvae of myxophora decreased, the ability of the larvae to calcify the pipeline was reduced or even lost entirely. Previously, Wipf et al. prepared a pH microelectrode by depositing aqueous oxidized iridium onto a carbon fiber microelectrode which showed two linear regions of potential response between pH 2-6 and pH 6-12. The prepared electrode could monitor the proton reaction of the interface by applying a scanning electron microscope as a probe to obtain the dynamic pH change at the surface. Iridium oxide microelectrodes have sufficient pH sensitivity to readily detect pH changes in their environments. Zhao et al. synthesized N-(6-aminopyridin-2-yl) ferrocene to develop a two-channel electrochemical ratiometric biosensor for local pH determination in a live rat brain, which will potentially provide a new research perspective and application scope for the IROF pH electrode. Thus, we believe that the miniaturization of IROF pH electrodes can further reveal the proton transfer in somatic cells of the metamorphic larvae of myxoplasma spp. during pipeline calcification. In addition, IROF pH electrodes can also be used to determine the pH of biological samples and tissue culture media and for realtime monitoring of pH changes in culture media to more objectively and accurately assess the biological condition of cultures. Tabata et al. successfully monitored proton release during an amplification reaction in real-time, using a miniature pH electrode, and demonstrated the quantitative detection of nucleic acids. Vanhoudt et al. reduced a mmscale to the pm-scale by anodic polarization and protected the iridium pH electrode by cyclic voltammetry in 0.5 M sulfuric acid by placing silk in a capillary glass tube. Similarly, IROF pH electrodes can also be employed to measure pH values in biological samples and tissue culture media. During ocean acidification simulations, the acidification rate of some biological cells and other indicators should be measured. In the meantime, Ges et al. generated microfluidic chips (20 mm x 400 mm) with two IROF electrodes; the acidification rate of cultured cells was obtained by a reproducible voltage difference between two IROF electrodes. These multidimensional analytical methods of marine organisms using IROF pH electrodes can be used to explore the mechanism of these pH stresses to develop more targeted protective measures. There is no doubt that pH may not be the only factor affecting biological growth, but it is necessary to monitor the parameters of these potential effects through adequately designed experiments. In the future, more and more researchers will pay attention to the properties of IROFs as a pH-sensitive material when the traditional pH glass electrode cannot be used. Seawater The pH value is an index that must be measured during the monitoring of ecological environments and water quality because it directly determines the state of some environmental pollutants or sediments in water. A comparison of commonly used instruments for pH monitoring of marine water quality is shown in Table 2. The IROF pH electrode is not affected by turbidity and can be applied for pH monitoring in estuarine areas with high turbidity. Based on the advantages of IROF pH electrodes, it is reasonable to apply them in marine water quality monitoring. Nadappuram et al. fabricated nanoscale dual-function pH scanning ionic conductivity microscopy (SICM) probes using carbon electrodes coated with iridium oxide. By measuring the distance of the tip of the electrode, a high-resolution three-dimensional pH diagram was generated. Salimi et al. did not confine their study to simply determining the pH value by the IROF electrode. Borondoped diamond (BDD) electrodes, obtained by electrochemical deposition, were used to modify IROFs for the detection of ultratrace amounts of environmental arsenic and mercury, which greatly broadened the applications of the IROFs electrode. Zhang et al. applied the IROF pH electrodes prepared by an electrogrowth method to determine the pH value of a marine environment, and the results were more stable than those obtained by a traditional glass electrode. Using the characteristics of IROF pH electrodes to fabricate a multi-parameter integrated environmental monitoring sensor is also an important future direction for. A multi-parameter real-time continuous water quality monitoring system has been reported by Defe et al., which can further reduce the cost and maintain highprecision measurements. Wu et al. simultaneously measured the pH value, oxidation potential E h, and H 2 S concentration in a deep-sea environment using a self-made multi-parameter integrated sensor ; the schematic figure of the pH electrode is shown in Fig. 4. The two most important parts, the data exchange port and the integrated electrode, are presented. Marine environmental quality data is heading towards intelligent terminals and wireless transmission. The application of IROF pH electrodes in marine environmental monitoring includes their applications in ocean buoys, which are integral parts of marine environmental water quality detection. The convenient detection of pH can be obtained through self-designed electrode shapes or assembly in electronic equipment modules, connected to a signal digital conversion system (DCS), data transmission of IROF electrode potential data, and signal processing with intelligent terminals. Currently, remote data transmission is being implemented in some offshore areas, but terminal monitoring sensors are expensive. Combining IROF pH electrodes with ocean buoys can effectively reduce costs and achieve long-term monitoring of more extensive areas in seas and rivers. With the development of informationization, integrated intelligent monitoring systems based on pH and other parameters have also been developed. Ding and Ma constructed a wireless sensor network system using embedded computing, micro-electromechanical systems (MEMS), distributed information processing, and wireless communication. The system could digitize, network, and could monitor seawater quality in real-time. It is also an important trend for environmental analysis to connect monitoring and analysis equipment with smart phone terminals and read data through common smart phones. Sun et al. developed a pH indicator system based on a smartphone platform, which connected to a self-made pH sensor through a headphone interface. After connection, it could be used to test and analyze sputum samples from cystic fibrosis (CF) patients. By applying these technologies, real-time data transmission can be displayed on intelligent terminals, which increases the convenience of pH monitoring in these fields. Sediment pore water The pH value of sediment and its pore water will affect the growth and development of benthic organisms and the distribution of sediment bacteria. Current methods for determining the pH of pore water in sediments mainly include the application of glass electrodes and Raman spectrometry. The application of pH glass electrodes requires a higher pore water content in sediments, and the measurement of multiple samples at intervals requires complex cleaning. Traditional pH microelectrodes consist of a glass film, and an Ag/AgCl electrode, and a saturated calomel electrode is combined to form a measurement system. Archer et al. used a polymethyl methacrylate (PMMA) glass microelectrode to measure the pH profile of deep-sea sediments to obtain the dissolution rates of seabed sediments. However, the pH glass film was damaged during tests, and the microelectrode was therefore not suitable for in situ measurements. The principle of determining the pH of pore water in sediments using Raman spectroscopy is based on the conjugated acid-base pair of H 2 S and HS, whose concentration ratio is a function of pH. The Raman spectra of sulfide-containing solutions with different pH values showed regular changes in the characteristic Raman overlap peaks and H 2 S. There is a particular coupling relationship between Raman spectroscopic parameters of sulfides and solution pH for the HS peak. The main disadvantage of using Raman spectroscopy to determine pH is that the measurement range is too narrow, with values between 6.11 and 8.32. Materials based on IROF pH electrodes have more comprehensive measurement ranges and can directly measure the specific thickness of sediment in situ. Pore water data of different sediment layers, which are closer to the sample parameters, can be obtained. Xu et al. prepared iridium and tungsten oxide pH microelectrodes to measure the vertical profiles of sediment pH in the Xiamen West Sea and Jiulong River Estuary. The pH reached a minimum at the interface of aerobic and anaerobic depths (45 mm) and tended to remain stable at 20 mm. In addition, the effect of S 2on the microelectrode sensor was eliminated by Nafion reagent leaching. Conclusions and future perspectives In light of the importance of pH measurements, the use of iridium oxide films as pH-sensing material has emerged as a popular research topic. Numerous studies have shown that the IROF pH electrode can determine the required pH parameters easily and in-situ real-time, and future analysis and determination will not be limited to the laboratory, in contrast to the use of traditional pH glass electrodes. In this paper, we introduce the role of IROF pH electrodes in water analysis and expand their application in the comprehensive analysis of marine environments, including marine organisms and sediment pore water. Nowadays, it is an important trend in environmental analysis to measure environmental parameters in a diversified, digital, and intelligent way. The solid-state IROF pH electrode is combined with remote wireless technology to realize the multi-parameter recording of the open sea. With electrode miniaturization, the impact of pH on marine organisms and the impact of ocean acidification on the environment and organisms can be further analyzed and evaluated. Declaration of Competing Interest The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper. |
On a day that saw Texas play 10–13 BYU and No. 6 Tennessee, the story of Saturday’s two matchups proved a story line the team has displayed this whole season: the Longhorns have yet to get over the hump.
The dichotomy of the day’s two games perfectly shows what the Longhorns have struggled with. They have easily handled unranked opponents, yet had problems with stronger ones.
A 14-0 game against BYU kept the trend of beating unranked teams alive. Pitcher Shea O’Leary, who has slowly become an ace on this pitching staff, threw yet another shutout, giving up just four hits. O’Leary has now given up three earned runs in 48.2 innings of work, good for the fourth-best ERA in the country.
Texas scored the second-most runs of this year in large part due to the play of leadoff hitter Reagan Hathaway. Hathaway hit the second pitch of the game over the wall in dead center, then singled in the second inning to knock in two RBI on the day as the Longhorns scored in every inning.
Yet, game two was an entirely different story. It was a rough start on the mound for pitcher Miranda Elish against a Tennessee lineup with a lot of firepower. Elish lasted just 2.2 innings as she gave up five runs on five hits. Brooke Bolinger came into the game in relief and had problems with her command, walking five hitters and giving up three runs in her two innings of work.
It took a late rally and some major power from the Texas offense to bring this game close. Washington and Elish hit back-to-back home runs in the fourth. Then, Hathaway and catcher Mary Iakopo each hit one out in the seventh inning, giving the Longhorns their five runs. Yet it wasn’t enough, as the potent Tennessee lineup collected more timely hits with runners on base while Texas stranded seven runners on base.
The Longhorns will have a chance for redemption in Knoxville on Saturday, where they again take on BYU and the Volunteers. |
I thought I had said about all I needed to say about the dangers and the possible silver lining in the victimary politics of our era, but a recent conversation with Trevor Merrill led me to draw a less optimistic conclusion.
I suggested in Chronicle 461, after complaining about recently emerged (let’s call them) “neo-victimary” practices such as stigmatizing “micro-aggression,” that during the current administration, despite some often rather frightening institutional tendencies, personal relations between blacks and whites, and no doubt between members of “majority” and “minority” groups in general, have improved. But we must remain aware that “personal” relations, when not entirely casual, are subject to the pressure of victimary institutional regulations. This is no doubt most problematic in the sexual domain, as we have seen recently in the publicity given to the claimed prevalence of “sexual assault” in universities and in the military. Since 2011, in campus complaints of sexual assault or rape, the (inevitably male) accused is not given due process but dealt with according to “the preponderance of the evidence.” Here the public and private, institutional and personal sides of the question face off against each other in the most critical sense. On the one hand, sexual relations are the most intimate of all; a young unmarried couple’s decision to engage in sexual activity is certainly not determined by their respective institutional roles. Yet, under pressure from the US Education Department, universities are increasingly regulating these relations independently of the civilian judicial system and without providing the normal safeguards this system grants the accused.
Characteristically, the victimary institutional configuration is presented ideologically as the universal truth of what to the “victim” in the private context might otherwise appear as the encounter of two equals. The notion of micro-aggression, similarly, is not limited to formal settings but provides a means to turn virtually all encounters between members of different groups into potential acts of victimization. If the victim does not notice the micro-aggression, or more likely, shrugs it off, he/she is accused of complicity with the oppressor.
In opposition, or at least in tension with this official discourse is the “natural” human tendency to consider any human relationship as equal absent any overt signs of subordination. The optimistic position taken in Chronicle 461 was that despite official efforts, this latter attitude would prevail, not simply in opposition to these efforts, but because their overall effect, however much specific instances might be oppressive, would be to sensitize the majority to potential offenses to the minority, and conversely, to empower the victimary group in such a manner that its members, whether benefiting or not directly from affirmative action, would increasingly feel themselves the equals of the formerly dominant majority. This is a dynamic that must play itself out before we can judge its effect. Outside the institutional environment (and the jargon it creates), human interactions are in principle self-regulating and may improve, but given the ever-greater inroads of the “Nanny State,” the danger of expanding legal responsibility for micro-aggressions of various kinds cannot be ignored.
“Triggering” is a significant new means to anticipate and prevent micro-aggressions that provides an useful point of reflection; as Jonah Goldberg puts it, it displays a “peculiar madness.” The point of triggering is less to prevent offense to the average victimary member, who is unlikely to be offended by the violence or racism of a literary passage, than to the particularly sensitive individual who will. Its function, in other words, is therapeutic rather than interdictive. Behaviors to be “triggered” are not those “macro-aggressions” condemned as openly offensive. A professor using a racial slur against a student will probably lose his job; referencing such a slur in Mark Twain risks troubling the sensibilities of a few students, to whom the “triggering” is addressed.
So here we have a situation where most students “lighten up,” but where triggering is necessary for those who cannot. But once triggering is considered obligatory, those who had previously lightened up are asked by the system to darken back down and become offended, if not “viscerally” for themselves, then as proxies for the potential sufferers, as the equivalent of those whose dubious sufferings from “secondary” (and now “tertiary”) smoke have led to banning smoking nearly everywhere. The imposition of rules purporting to protect victims earns a dividend in new sources of potential resentment and righteous indignation against offenders. So the question posed by triggering in formal situations as well as by micro-aggression generally is how the process of “lightening up” will fare in competition with the ever-growing occasions for the validated expression of victimary resentment.
The trigger phenomenon is above all a demonstration of the desire of non-traditional victimary groups to assert their right to victimary consideration: those, for example, who are offended by scenes of violence, or by the foul language that the general culture constantly extends to new venues (e.g., the omnipresent expression WTF). And anyone has a right to denounce micro-aggressions to demonstrate that we are all indeed deeply offended by even the most peripheral forms of racism and sexism.
The dynamic of the victimary system is just the opposite of Burke’s preference for the tried and true: there is no aspect of “normal” social relations that victimary politics cannot reinterpret as an example of oppression. In effect, what we have been calling “firstness” is for victimary thought oppressive by definition. Clearly the society has evolved to the point where the normative leadership of the traditional majority can no longer provide a model for minorities to work toward. The statements of extreme victimary thinkers, such as the quote from Ta-Nehisi Coates in Chronicle 461, imply that between groups such as whites and blacks, all relations are oppressive. But indeed, so is every interaction between any persons, no two of whom can possibly occupy exactly the same level in every relevant hierarchy. The only possible exception would be one wholly informed by a vigilance that makes “normal” interaction impossible, as in those satiric sketches of a couple who undertake to engage in sexual activity each flanked by an attorney.
We must not forget that the attack on the normal relations between ascriptive social groups takes place within a context in which other, less symbolic and more structural resentments exist. The most fundamental such resentment has led to the extraordinary success of Thomas Piketty’s recent Capital, which, the book’s intrinsic merits aside, reflects its comforting of the “99%” by characterizing “capitalism” as providing disproportionate rewards for wealth over labor, inheritance over work, being over doing, contrary to the unspoken principle of market exchange that success in the economy is a reward for productive labor. To the extent that this analysis, whatever its value as economics, strikes a chord in the human relations of our era, we are tempted to view the potentially unlimited battle against micro-aggressions as what Girard would call an act of sacrificial substitution for the growing inequality of our increasingly winner-take-all global society.
“Inequality” here refers not exclusively or even primarily to income, but to recognition, the ultimate value for humans, at least for those who have met their basic biological needs. What generates disaffection in a world characterized by ever-growing numbers of billionaires is the increasingly salient demonstration (e.g., in the political influence of George Soros, the Koch brothers, Tom Steyer…) that some individuals possess a significance to the society thousands, millions of times greater than our own.
Kevin Williamson, one of the sharper conservative columnists working today, makes the point in a recent column about the Santa Barbara shootings that such actions are a form of “theater” on the stage of public recognition. He might have alluded to the classic case of Herostratus, who supposedly burnt down the temple of Diana at Ephesus so he would always be remembered; a successful effort, as you see (see Chronicle 177). Herostratus doesn’t seem to have killed anyone; in his day, burning an empty temple got you more publicity than mass murder, since gods were considered less replaceable than humans. Today, in the age of global media, killing a few people in an appropriately dramatic fashion moves you from number 4,000,000,000 on the world celebrity list to the top 10, and even after the publicity dies down, you remain in the top few thousand rather than sinking back down to the lower billions. For some who see their lives as devoid of other satisfactions, this is worth the usual outcome of suicide, (rarely) execution, or life in prison.
These pathological cases may well be no more frequent now than before; they just get more publicity in the increasingly voracious internet-era “24-hour news cycle.” This reflects the fact, which is my real point, that as the means of publicity increase, the famous become ever more famous, and to a far lesser extent, more people become at least temporarily famous (“Everybody will be famous for fifteen minutes”), but most people never become famous at all, and knowledge of the trivial cause of the fame of many, or of the trivial nature of fame itself, even for those who “deserve” it, is an insufficient compensation for the frustration caused by the lack of it.
What once was such a compensation, fully adequate for the vast majority, is best described by a term one no longer hears: respectability, the local but intensely meaningful respect of one’s neighbors, earned by adherence to the very normality that the victimary despises and seeks to undermine. This kind of recognition, although it has not disappeared, is fatally undermined by the communications network of the global society. The local forces that once defended the normal and the (limited) elements of firstness it demanded are swamped and defeated: the proverbial “main street,” the “local scene,” the theater of small-scale social behavior, has been boarded up.
It is in this context that we should understand the genius of phenomena such as micro-aggression and triggering, which permit not only members of victimary groups but other potential offendees to consider themselves infinitely aggrieved, to complain, and perhaps be compensated directly or by publicity as victims; to act, in a word, on a little stage of their own. Similar are the ecological offenses that allow even white males not only to feel and overcome guilt but to suffer vicariously themselves. Not as spectacular as the role of mass-murderer, but theater nonetheless.
The connection between the “winner-take-all” aspect of the postmodern era and its passion for the victimary becomes clearer if we expand the scope of our inquiry beyond the frontiers of the the US and its “capitalist” allies. The resentments aroused by modernity in the global context are far more virulent than those expressed in the white-guilty West, which they nonetheless influence by mimetic contamination. Al Qaeda, Boko Haram, and similar Islamic movements are the barbaric expression of traditional, essentially agricultural society’s resistance to “civilization,” exacerbated by the invasive nature of global modernity. The most prominent point of contact between these movements and Western victimary resentment is hostility to Israel, a predictable variant of an older form of “sacrificial substitution.”
Thus “Islamophobia” appears with increasing frequency in the ever-expanding lists of victimary sins along with “ableism,” “transphobia,” et al. These hard victimary attitudes reflect the frustrations of the young in a world of (apparently) declining opportunity—and greater student loan debt. The Islamic connection expresses the most fundamental hostility of all to the Western values that have brought about the winner-take-all world. Young people who don’t want to go as far as the recent SB murderer can find solace in the neo-victimary’s alliance with the traditional anti-firstness movement of antisemitism.
At least for the moment, 9/11 not having been repeated, none of this poses any threat to the “powers that be,” provided they avoid obvious infractions to the victimary (“PC”) code of the sort committed recently by the newly notorious Sterlings. After all, the most curious aspect of this “kerfuffle” (a word borrowed from Walter Scott) is that a brief expression of racial prejudice in a (clandestinely recorded) private conversation provokes expulsion from the NBA owners’ club and a seven-figure fine, but years of slumlord profit-chasing, generating billionaire wealth (and expensive mistresses), result in an award from the NAACP.
All the fuss about victimary affairs, which occupies so much time and energy in all areas of university and corporate personnel administration (e.g., Google’s recent apologies for “imbalance” among racial and gender groups, as though morality is served only when every victimary group is at least proportionally represented in every “prestigious” position), serves as an ideological safety valve in which the society is willing to invest many taxpayer dollars, so long as the overall economy is not greatly affected. In this regard, even the egregious inefficiencies incurred in pandering to the environmental movement (e.g., the Keystone pipeline) are a relatively small price to pay to maintain the overall socio-economic hierarchy.
It would seem then that the inefficiency, not to speak of the hypocrisy and nastiness, of the victimary are costs incurred for the purpose of symbolically mitigating the excesses of “market meritocracy,” the rule of those who make themselves marketable via skills not necessarily wholly describable by the term “merit.” But there is one more key element to consider.
The postwar American self-image was not long ago dominated by a mid-skilled middle class whose lost reign is nostalgically described in the first part of Charles Murray’s Coming Apart, and its incipient decline in Paul Schrader’s comi-tragic Blue Collar (1978). The middle-of-the-road situation of this class, its market position buoyed up by labor unions, held the whole society together—smoking the same cigarettes, drinking the same coffee, wearing similar clothes, watching the same movies and TV shows, driving cars differentiated only by decorative luxury. As Murray’s book despairingly illustrates, over the past few decades, the sharp decline, even more cultural than economic, of this “Fishtown” class, whose modest skills are no longer competitive in the global market, has undermined normality and provoked the excesses of the victimary. The fall of the white (and black) working class from “respectability” as defined above both reflects and contributes further to the decline of local modes of recognition. This vicious circle, as manifested in an appalling increase in the proportion of births out of wedlock and other similar statistics, is far from the end of its run.
Piketty’s analysis, which I am not qualified to judge, claims that America’s midcentury middle-class golden age (the French version of which was called les trente glorieuses [années] and the German, das Wirtschaftswunder) was not really characteristic of the market system, which tends toward a class separation of the Murray type. The difference between today’s “coming apart” society and that of the “robber baron” era reflects the greater cultural salience of the 99%, no longer downtrodden, and fully exposed to the winner-take-all examples of both the media and, more tragically, the economic system, which limits the possibilities of all but the highly skilled. The recent inclusion of college graduates in this group (e.g., in the “Occupy” movement) no doubt signals a tipping point.
This suggests that, absent an inconceivable calamity on the scale of WWII, a period of social cohesion like the immediate postwar era will not recur. If this is true, then an exaggerated concern for the victimary is a relatively inexpensive proxy for addressing a human disadvantage that cannot be rectified “symbolically,” a loss of social cohesion that no quantity of transfer payments can address. Today’s poor may be obese rather than underfed, wear $100 sneakers and watch 60-inch TV sets, but they are marginalized in ways that the old working class, white or black, never was, even as the well-off are ever more lionized and aped for their “if you’ve got it, flaunt it” lifestyle. The social divide embodies “aggressions” against the moral model of human reciprocity that university demonstrations against Ayaan Hirsi Ali or Israeli “apartheid” or cisgenderism can do nothing to address. Whence the victimocracy’s originary and ultimate despair concerning the human condition and its fate.
In a follow-up to this Chronicle, I will attempt a sketch of the history of the postwar victimary era in the light of these reflections. |
Dynamics of cockroach ocellar neurons. The incremental responses from the second-order neurons of the ocellus of the cockroach, Periplaneta americana, have been measured. The stimulus was a white-noise-modulated light with various mean illuminances. The kernels, obtained by cross-correlating the white- noise input against the resulting response, provided a measure of incremental sensitivity as well as of response dynamics. We found that the incremental sensitivity of the second-order neurons was an exact Weber-Fechner function; white-noise-evoked responses from second-order neurons were linear; the dynamics of second-order neurons remain unchanged over a mean illuminance range of 4 log units; the small nonlinearity in the response of the second-order neuron was a simple amplitude compression; and the correlation between the white-noise input and spike discharges of the second-order neurons produced a first- order kernel similar to that of the cell's slow potential. We conclude that signal processing in the cockroach ocellus is simple but different from that in other visual systems, including vertebrate retinas and insect compound eyes, in which the system's dynamics depend on the mean illuminance. INTRODUCTION Insects have two kinds of visual organs, the compound eye and the ocellus. The former has been the subject of extensive study; the latter has received less attention. There is evidence that the ocellus plays an important role in the insect's visual behavior (Goodman, 1981 ;Taylor, 1981a, b). The insect ocellar retina contains many (>100) photoreceptors and a small number of (<12) large secondorder neurons, called L-cells. Recordings from insect ocellar neurons were first made extracellularly by Ruck (1957Ruck (, 1961 and intracellularly by Chappell and Dowling. These authors showed that light stimulation depolarized the ocellar receptors and hyperpolarized the L-cells. Subsequent studies in locusts (Patterson and Goodman, 1974 ;Wilson, 1978 ;Simmons, 1982a), dragonflies (Chappell and DeVoe, 1975 ;Patterson and Chappell, 1980 ;Simmons, 1982b), bees (Milde, 1981(Milde,, 1984, and cockroaches (Mizunami et al., 1982) have confirmed Chappell and Dowling's original observation that ocellar L-cells produce hyperpolarization, but have also shown that the patterns of the light-evoked responses are different in different insects. 276 THE JOURNAL OF GENERAL PHYSIOLOGY " VOLUME 88 -1986 In the past, most of the functional studies on ocellar neurons have been performed with steps of light given in the dark, as in most of the studies on other visual systems. Consequently, not much is known about the cell's response dynamics or how they respond to modulation around a mean illuminance, which is the condition in which ocellar neurons function in the natural environment. One notable exception is the study of Chappell and Dowling, who measured incremental responses of dragonfly ocellar L-cells and concluded that the incremental threshold of their offset responses is a Weber-Fechner function over a 5-log range of mean illuminance. In this study, we stimulated the cockroach ocellus with white-noise-modulated light, and analyzed response dynamics of ocellar neurons by cross-correlating the light inputs with the resulting cellular responses. The methodology, referred to as white-noise analysis, enabled us to define incremental sensitivity as well as response dynamics over a large range of mean illuminance. We analyzed the responses of the second-order neurons, L-cells, recorded intracellularly. We found that (a) the modulation responses were linear and the first-order kernels could predict cellular responses with mean square errors (MSEs) of~10% ; the incremental sensitivity was an exact Weber-Fechner function over a 4-log range of mean illuminance ; (c) waveforms of kernels remained unchanged over the same mean illuminance range: the response dynamics were independent of mean illuminance; (d) the small second-order nonlinearity was accounted for by a simple compression of the hyperpolarizing first-order kernel : no complex nonlinearity was found in the L-cell response ; and (e) correlation of spike discharges with the white-noise inputs produced first-order kernels very similar to those from slow potentials. Biological Adult male cockroaches, Periplaneta americana, reared in the laboratory of Kyushu University, were used. The cockroach was rigidly mounted on a Lucite stage and its head was immobilized using beeswax. The compound eyes and the other ocellus were lightshielded with beeswax mixed with carbon black. The cockroach survived several days under this condition. For recording from second-order neurons, the cuticle between the two ocelli was removed and the ocellar nerve was exposed. The exposed tissue was treated with 1% pronase type IV (Sigma Chemical Co., St. Louis, MO) in cockroach saline (Yamasaki and Narahashi, 1959) for 1 min, to facilitate electrode penetration. Recordings were made with a glass pipette filled either with potassium acetate (2 M) or potassium citrate (2 M). Both electrodes (resistances of 50-80 MSt) produced similar results. The indifferent electrode (a platinum wire) was placed in a saline pool in which the exposed tissues were bathed. Fig. 1 shows a schematic representation of the system for measuring the light (input) and response (output). The light source was either a glow tube (R-1130B, Sylvania/GTE, Exter, NH) or a light-emitting diode (Sharp Corp., Tokyo, Japan). The spectral composition of the glow tube was nearly flat from 400 to 700 nm, whereas that of the lightemitting diode had a peak at 560 nm. Both stimuli produced similar results. A series of neutral-density (ND) filters attenuated the light beam in 1-log steps. The white-noise signal was obtained from a random signal generator (WG-772, NF Circuit Design Block, Tokyo, Japan). The depth of modulation defined in a conventional fashion, (I-a. -Im ; )/ (in-+ I, ; ), was^-0.7-0.9 at 0 dB. The depth of modulation of the white-noise signal is an approximation because of the statistical nature of the input. Light signals were monitored by a photodiode (TFA 1001 W, Siemens-Allis, Inc., Cherry Hill, NJ) before they were attenuated by filters. Light stimulus and cellular responses were initially stored on analog tape and analyzed offline on a VAX 11/780 computer (Digital Equipment Corp., Maynard, MA) with an AP 120B array processor (Floating Point Systems, Portland, OR). INCREMENTAL SENSITIVITY 277 FIGURE 1. Schematic drawing of experimental procedure. The light source was either a glow modulator or a light-emitting diode. A series of ND filters were interposed between the light source and the preparation to attenuate both the mean illuminance and white-noise modulation by the same proportion, so that the "contrast" of the stimulus was kept unchanged. The light signal was monitored before it was attenuated by filters and a correlation was made between the unattenuated light signal and the cellular response. The correlation produced kernels on a contrast sensitivity scale. Kernels were converted to an incremental sensitivity scale by multiplying the kernel's amplitude by the attenuation factor. The light stimulus the cockroach received daily or nightly consisted of two parts, one with a steady mean, I., and the other with a modulation around the mean, 1(t), as shown in Fig. 4. The mean illuminance, I., changes slowly but covers a large range. The modulation depth of fluctuation around the mean illuminance, however, is moderate and should remain roughly constant. The response evoked therefore consists of two components, the steady mean, V, and the modulation response, V(t), the former being related to Io and the latter to I(t). The peak of the step-evoked response, VP, may be different from Vo. The relationship between Io and Vp or Vo is a cell's DC (static) sensitivity : how a cell responds to steps of light given in the dark. The classic example is Naka-Rushton relationship (Naka and Rushton, 1966). The relationship between I(t) and V(t) has been obtained by measuring the threshold, i.e., the intensity of stimulus that produces a just-detectable response, the classic example being the Weber-Fechner relationship. However, the responses of visual neurons that do not produce spike discharges have no threshold that can easily be defined. In a white-noise analysis, the relationship between I(t) and V(t) is represented by kernels obtained by cross-correlating the white-noise input with the resulting cellular response. The results of first-order cross-correlation, weighted by the power of the stimulus, are 27 8 THE JOURNAL OF GENERAL PHYSIOLOGY " VOLUME 88 -1986 the first-order kernels. The first-order kernel is the linear part of the cell's response to an impulse input superposed on a mean illuminance. Ifa cell's response is linear or quasilinear, the amplitude and waveform of first-order kernels are therefore the comprehensive measure of a cell's incremental sensitivity and the response dynamics. If a cell's response contains second-order nonlinear components, the first-and second-order kernels represent the linear and nonlinear components of the cell's incremental response, and their amplitudes and waveforms represent the cell's incremental sensitivity and response dynamics (Sakuranaga and Ando, 1985). The spikes evoked by white-noise stimulus can also be analyzed as in the case of the analog response. A correlation was made between the spike discharges (a point process), which were transformed into 5-ms pulses, and a white-noise input. The resulting kernels are interpreted as the post-synaptic potential, which triggers a spike discharge (Ando, YA., M. Sakuranaga, and KA. Naka, manuscript in preparation). In actual experiments, the light signal was monitored before it was attenuated by ND filters, and a correlation was made between the monitored light signal, 10".I(t), and the modulation response, V(t), where n is the log attenuation factor of the filters. The DC components in both signals, Io and V., were subtracted out before correlation. The results of the correlation were kernels whose amplitude was on a contrast sensitivity scale. The kernel's ordinate values could be converted to an incremental sensitivity scale by multiplying their amplitude scale by the attenuation factor, 10", i.e., for a 1-log filter by 10, for a 2-log filter by 100, etc. Conversion is only for the amplitude and does not affect the waveform of kernels. The linearity of a cell's response can be assessed if we know how well the linear model obtained by convolving the original white-noise signal with the first-order kernel matches the recorded cellular response. The degree ofaccuracy is the MSE. The theoretical aspects of the analysis are described by Sakuranaga and Ando, and algorithms for computing first-and second-order kernels, model responses, and MSEs can be found in Chappell et al.. RESULTS The cockroach has two ocelli, one at the base of each antenna ( Fig. 2A). Each ocellar retina contains^-10,000 photoreceptors, and they converge on four large second-order neurons, or L-cells (Weber and Renner, 1976). As shown in Fig. 2B, the L-cell has extensive dendritic branches in the ocellus (), where the neuron receives what appear to be ribbon synapses from the photoreceptor axons (Weber and Renner, 1976;Toh and Sagara, 1984). The axon of the L-cell projects into the brain through the ocellar nerve. In the brain, L-cells make synaptic contacts with third-order neurons (Toh and Hara, 1984;Mizunami et al., 1986). In this study, intracellular recordings from L-cells were made from the axonal region of the ocellar nerve. Stable recordings could be made for 30-60 min. Fig. 3A shows the responses of an L-cell evoked by 250-ms flashes whose illuminance was increased in 1-log steps. The responses to brief steps of light showed sustained hyperpolarization, and a few (one to four) spikes were seen on the depolarizing phase of the offset response. Note that the sustained nature of the response was due to the short duration of the stimulus (cf. Fig. 4): with continued stimulation, the membrane potential depolarized to a new steady level, V~. Fig. 3 B shows the V-log I plot, in which the peak response amplitudes, VP, is plotted against the log of stimulus illuminance, Io. The curve, an average from five L-cells, is S-shaped, which relates to the cell's static (DC) sensitivity. A similar S-shaped function was seen in L-neurons of locusts, bees (Milde and Homberg, 1984), and dragonflies (Chappell and DeVoe, 1975). Fig. 4 shows the L-cell's responses evoked by steps and white-noise-modulated stimuli. Brief steps of light given in the dark produced step-like responses with a peak, VP. A spike is seen at the offset of the stimulus. The relationship between the amplitude of the step stimulus, I., and the peak of resulting response, VP, is FIGURE 2. (A) Head of a cockroach. The cockroach has a pair of ocelli (arrow) at the base of the antenna, in addition to the compound eyes. (B) Ocellar second-order neuron (L-cell) viewed dorsally. The drawing is from a cobalt-filled neuron. L-cells extend their dendritic branches into the ocellar retina, and receive inputs from a large number of photoreceptors. The axon of the L-cell projects into the ocellar tract of the brain, through the ocellar nerve. In the ocellar tract, the L-neurons make output synapses onto a number of the third-order neurons (Toh and Hara, 1984). The cell body is located in the brain (arrow). Scale: 1 mm (A); 200 Am (B). shown in Fig. 3. At the beginning of white-noise stimulation, a transient peak similar to the one produced by steps of light was seen. With continued whitenoise stimulation, the membrane potential reached a steady level, V., within 30-40 s. The steady level was maintained as long as the stimulus was continued, i.e., the L-cell reached a dynamic steady state. At the steady state, the depolarizing phase of the slow potential fluctuation often exceeded the membrane potential observed in the dark. This is clearly seen in the probability distribution function (PDF) of the response in Fig. 5 C. The kernels were computed by cross-correlating the slow potentials or spike discharges against the white-noise.inputs during the dynamic steady state. Spike potentials were removed with a low-pass filter (0.1-50 Hz) for slow potential analysis. For analysis of spike discharge, a trigger circuit produced standard pulses of 5 ms for each spike discharge. 280 THE JOURNAL OF GENERAL PHYSIOLOGY -VOLUME 88 " 1986 Fig. 5A shows the slow responses of an L-cell produced by white-noise stimuli with modulation depths of 0, -10, and -20 dB but with the same mean illuminance of 20 uW/cm2. The response produced by stimuli with various modulation depths had the same steady mean hyperpolarization, but the amplitude of the modulation response changed in proportion to the modulation depth of the stimulus. To clarify this observation, first-order kernels were computed from a longer (80-100 s) record for each depth of modulation. The three kernels from 0, -10, and -20 dB stimuli had identical amplitudes and waveforms (Fig. FIGURE 3. Step-evoked responses from an L-cell. Four responses evoked by light stimuli with 0, 1, 2, and 3 log attenuating filters are shown. The illuminance of stimulus without filters (0 log) was 30 UW/cm2. A few spikes are seen at the offset of the stimuli. (B) Relationship between the amplitude of step-evoked response and the magnitude of step stimulus. An average from five L-cells is shown with the standard deviations. 5 B). The incremental sensitivity and response dynamics, which are the amplitudes and waveforms of the kernels, did not depend on the modulation depth of the stimulus, which is what we would expect from a linear system. Fig. 5 C shows three pairs of PDFs from 0, -10, and -20 dB records, each pair being the PDF for the light stimulus and response. The PDFs are plotted on an absolute scale, in millivolts for the response and in microwatts per square centimeter for the light stimulus. First the PDFs were computed from a section of a record after the removal of the DC components as described in Materials and Methods. The levels of mean hyperpolarization, V., were measured from the original record (one example is shown in Fig. 4) and the PDFs were plotted so that they represented the modulation around the mean hyperpolarization, V.. The PDFs FIGURE 4. Responses from an L-cell evoked either by steps of light given in the dark or by white-noise-modulated light. The relationship between Io and Vp or Vo is the cell's DC (static) sensitivity and the relationship between I(t) and V(t) is the incremental sensitivity. Spike potentials are seen at the offset of step stimulation as well as during white-noise stimulation. for the response and stimulus of the three pairs matched well. PDFs of whitenoise stimuli are Gaussian, and, if a system is linear, the PDF of its response to a Gaussian white-noise stimulus must be Gaussian. The observations shown in Fig. 5 indicate that (a) the mean level of hyperpolarization was produced by the mean illuminance, I.: as long as the mean illuminance remains unchanged, the mean hyperpolarization remains unchanged ; (b) the cell's modulation response was 28 2 THE JOURNAL OF GENERAL PHYSIOLOGY -VOLUME 88 " 1986 linear because the three kernels produced by white-noise stimulus of three depths of modulation were identical ; and (c) the linearity of the modulation response is also suggested by the PDFs. If the response of a cell is linearly related to the stimulus modulation, the cell's response to the modulation should be predictable from the first-order kernels with a fair degree of accuracy. Fig. 6A shows the time records of the white-noise stimulus (upper trace) and the resulting response (lower traces in a continuous FIGURE 6. Time records of part of a white-noise stimulus and the resulting cellular response (continuous line). Superposed on the response trace is the linear model (broken line). PDFs for the light stimulus and the recorded response are also shown. The light PDF is also superposed on the response PDF. In B, power spectra of the light stimulus, response (continuous line), and model (broken line) are shown. The mean illuminance of the stimulus is 20 AW/cm2. line). Superposed on the response trace is the model response predicted by the first-order kernel (broken line). Although there are occasional deviations, the two traces matched well, which shows that the response could be predicted from the first-order kernel fairly accurately. Indeed, the averaged MSE computed from five L-cells was 11.1%, with a standard deviation of 2.1 %. Fig. 6A also shows the PDFs of the light stimulus and of the response PDF. The light stimulus PDF is also superimposed on the response. Although there is a minor deviation between the two PDFs near the mean, they were in good agreement. Fig. 6B shows the power spectra of the light stimulus, response, and model. The power spectrum of the response (continuous line) matched well that of the model shown by the broken line. Both had a peak at^-8 Hz and had a slight bandpass-filtering property, as seen by the lower power for the low-frequency region. A similar analysis made on the responses at a mean illuminance level of 20-0.002,UW/cm2 showed that the responses produced by white-noise modulation were linear. The response produced by a stimulus with a mean illuminance of <0.002 kW/cm2 had a much larger MSE (>30%), probably because of the noise in the response. We allowed the animal to adapt to the low illuminance stimulus light for^-15 min, but the MSE of the response was still >30%. If a system is linear, the system's response to any arbitrary stimulus should be predictable from its first-order kernels. Fig. 7 shows an example in which the Lcell's response was evoked by a stimulus modulated by a sinusoidal sweep. In the figure, the prediction (model) obtained by convolving the stimulus with the first-0.3s order kernels (broken line) is superposed on the actual response (solid line). The two traces match well, as expected from a linear system. The observations in Figs. 5-7 indicate that the modulation response from the cockroach L-cell is almost linear and therefore the first-order kernels are good approximations of the cell's incremental sensitivity and response dynamics. Fig. 8A shows the kernels obtained at five mean illuminance levels, plotted on a contrast sensitivity scale. In this experiment, a cell was impaled and the retina was dark-adapted for 5 min; the test began with a 5-log ND filter interposed. After each white-noise test run, which started after 90 s of adaptation to the stimulus light and lasted for 90 s, the density of the ND filter was decreased in 1-log steps. After the test by the maximum illuminance (0 log), the sequence was reversed. Both sequences produced similar results. The kernels were hyperpolarizing and monophasic (integrating). The waveforms were identical, with constant peak response times of^-50 ms, and the amplitudes differed by only 30%. This is remarkable because the mean illuminance for which the kernels were computed covered a range of 1 :10,000. Stimuli dimmer than -4 log units produced no reliable results, although we allowed the animal to adapt to the 28 4 THE JOURNAL OF GENERAL PHYSIOLOGY " VOLUME 88 -1986 stimuli for 15 min. For comparison, kernels from a horizontal cell of the turtle, Pseudemys scripta elegans, obtained under comparable conditions, are shown in Fig. 8B. Note that the turtle's cellular response could be predicted from the first-order kernels with MSEs of <10% (Chappell et al., 1985). In the turtle's horizontal cell, the amplitude of the kernels on a contrast sensitivity scale decreased as the mean illuminance decreased. As the mean illuminance was FIGURE 8. (A) First-order kernels, plotted on a contrast sensitivity scale, obtained at five mean levels. The first-order kernels were calculated by cross-correlating the white-noise light stimuli with the recorded responses. Kernels are labeled 0 through -4 to indicate the log density of the filters interposed. Note that the amplitudes of the kernels did not differ by more than 30% and the peak response times were constant at 50 ms for all kernels, although the mean levels covered a range of 1 :10,000. Stimuli dimmer than -4 log units did not produce any reliable results. B shows turtle horizontal cell kernels plotted as in A. The peak response times, waveforms, and amplitudes differed for different levels of mean illuminance. Kernel units are in millivolts per microwatt per square centimeter per second. The larger incremental sensitivity for ocellar kernels was due to the dimmer mean illuminance (20 IAW/cm2 at 0 log) of the white-noise stimulus than in the turtle experiment (50 wW/cm 2 at 0 log). increased, the peak response times became shorter from 100 to 50 ms and the waveform became more biphasic (differential). Thus, the response dynamics depend on the mean illuminance. Sets of impulse responses or kernels similar to the one shown in Fig. 8B have been obtained in the human visual system and lower vertebrate horizontal cells (Naka et al., 1979 ;Chappell et al., 1985). Fig. 9A shows the relationship between the amplitudes of the kernels on an incremental sensitivity scale and the level of mean illuminance. The plots, averaged from five L-cells, are on a straight line with a slope of -1 : this is the Weber-Fechner relationship, which shows that for a 10-fold increase in the mean illuminance, the incremental sensitivity decreases by a factor of 10 (i.e., the contrast sensitivity is independent of the mean illuminance). Fig. 9B shows the peak response times of kernels used to produce plots in Fig. 9A. For a 4-log range of mean illuminance, the peak response times remained virtually unchanged at 50 ms. This shows that the mean illuminance controlled only the scaling of incremental sensitivity, but not the response dynamics. Although the L-cell's response to white-noise-modulated light was linear, there was a small degree of nonlinearity. The second-order kernel represents. Incremental sensitivity plotted against mean illuminance. The ordinate is the amplitude of the first-order kernels on an incremental sensitivity scale, and was 25 mV/(AW/cm 2)-s at 0 log. The plot is an exact Weber-Fechner relationship. The peak response times of kernels at five mean illuminance levels are plotted in B. The peak response times were almost constant at -50 ms over the 4-log range of mean illuminance. In A and B, averages from five L-cells are shown with standard deviations. the nonlinearity produced by an interaction of two pulses. The second-order kernel is therefore a three-dimensional solid with two time axes, r, and T2, which represent the time relationships of two pulses. Fig. 10A shows an example of a second-order kernel from the L-cell's response. The second-order kernel had a (depolarizing) peak on the diagonal, which indicates that the nonlinearity is produced when two pulses are given concurrently. The second-order kernel shows that the magnitude of responses increases somewhat nonlinearly with the increase in the stimulus magnitude, because two stimuli given concurrently are equivalent to twice the increase in the stimulus amplitude. Fig. IOB shows the diagonal cut (side view) of the second-order kernel together with the first-order kernel. The waveforms of the kernels are mirror images of each other, which indicates that (a) hyperpolarization by the first-order kernel is opposed by depolarization caused by the second-order kernel : the nonlinearity was for a simple amplitude compression; (b) the nonlinearity was involved in the generation of the L-cell's slow response, because the waveform (time course) of the secondorder kernel was almost the same as that of the first-order kernel. A similar nonlinearity has been observed in vertebrate horizontal cells ( ;). The second-order kernel had no off-diagonal peak, which indicates that there is no nonlinear interaction of two pulses coming at any time interval. That is, FIGURE 10. Typical second-order kernel from L-cells. (A) Contour map of a second-order kernel with two axes, T, and 72. The magnitude of the second-order kernel is shown by the contour lines. The kernel is a solitary depolarizing peak on the diagonal, which indicates that the nonlinear response is depolarizing and is produced when two pulses of lights are given simultaneously. The nonlinearity is therefore produced by an increase in the stimulus amplitude. (B) First-order kernel (solid line) and the diagonal cut (side view) of the second-order kernel (broken line) shown in A. The waveforms are mirror images of each other, which shows that the nonlinearity was of the simple compression type. As the second-order nonlinearity is a quadratic function, the amplitude of the second-order kernel is a quadratic function of the input magnitude, whereas the amplitude of the first-order kernel is linearly related to the input amplitude. The ordinate units in B are for the firstorder kernel only. the responses produced by two pulses coming at an interval of t (t = T I -T2) are identical, and the two identical responses produced by two flashes sum linearly. In a system in which the summation is not linear and the response produced by the second pulse is affected by the first pulse, a deviation from linearity appears on the off-diagonal region, where T, 0 7.2-One such example is shown in Fig. 11, in which an intracellular recording was made from a catfish (Ictalurus punctatus) ganglion cell. A decremental flash produced transient on-off depolar-izations from the cell (Fig. 11 A). Although the cell's response included both the slow and spike components, our unpublished results show that the correlation of the white-noise input with either the spike or slow component produced a similar second-order kernel. The second-order kernel of the cell's slow potential shown in Fig. 11 B is much more complex than the one from a cockroach L-cell (Fig. 10 A) and is composed of two on-diagonal depolarizing peaks and two off-diagonal FIGURE 11. A response from a catfish ganglion cell produced by a decremental flash from a steady illuminance of 40,uW/cm 2 (A) and a second-order kernel of the cell's slow potentials (B). In B, the continuous contour lines are peaks and the broken contour lines are valleys in the second-order kernels. A second-order kernel represents the nonlinearity related to the timing of two pulses. Nonlinear interactions of two pulses arriving at the same time (T I = T 2) produced two on-diagonal depolarizing peaks, whereas two successive pulses arriving with a delay of 29 ms produced offdiagonal hyperpolarizing valleys. The latter point is illustrated in the figure by measuring the peak time of one of the valleys indicated by the intersection of two broken lines, which are 62 ms for r, and 33 ms for T2, i.e., 62 -33 = 29 ms. hyperpolarizing valleys. The second-order kernel is similar to those from type-C amacrine cells in catfish retina and is responsible for producing on-off transient depolarizations evoked by step inputs. In the cockroach L-cell, the amplitude of the response is simply related to the instantaneous amplitude of the input stimulus, whereas in the catfish ganglion cell, the response is generated by more complex signal-processing. The L-cells of the cockroach produced spikes at the offset of the step input, as did those of bees and locusts. An example of the spikes produced by a white-noise stimulus is shown in Fig. 4. To discover the relationship between the stimulus and the spike discharge, we cross-correlated the spike discharge with the white-noise stimulus. As shown in Fig. 12A, the normalized first-order kernel of spike discharge is hyperpolarizing, as is the kernel of the slow potential (computed from the slow potential in the same record), and their waveforms are very similar. We detected a latency of^" 5-8 ms between the two kernels. The fixed latency of the spike kernels suggests that the site of spike generation is some distance from the site of slow potential generation. The second-order kernel of spike discharge was a solitary depolarizing peak on the diagonal (Fig. 12B), and the waveforms of its diagonal cut was a mirror image of the first-order kernel (Fig. 12 C). The results suggest that no complex nonlinearity is involved in the spike generation. For a step decrement from a mean illuminance, we can predict a depolarization (excitation) from the spike first-order kernel. The depolarization is augmented by a depolarization from the second-order kernel, and will trigger spike discharge. The generation of spikes is related to the instantaneous amplitude of the input stimulus, but not to any particular timing hidden in the stimulus. DISCUSSION The first intracellular recordings from insect ocellus were made by Chappell and Dowling, who showed that a light stimulus depolarized the ocellar receptors and hyperpolarized the second-order neurons. They also showed that the incremental threshold of the offset depolarizing responses of the second-order neurons is a Weber-Fechner function. Although many reports on the insect ocellus have followed, the responses in these studies were evoked by flashes of light given in the dark and measurements were made on the static aspects of the step-evoked responses. We used white-noise-modulated light to evoke responses from cockroach ocellar neurons. Cockroaches in their natural environment do not experience a flashing spot of light in the dark. Their photic inputs fluctuate around a mean illuminance, and their visual systems including ocelli must be developed to appreciate changes around a mean illuminance, not a sudden flash in darkness. In our earlier studies on the horizontal cells in the retinas of the turtle (Chappell et al., 1985), catfish (), and skate (manuscript submitted for publication), the response dynamics of (hyperpolarizing) second-order neurons were examined with white-noise stimuli that mimic the inputs the retina receives in its natural environment. We found for the cockroach L-cell that (a) the incremental responses were linear with MSEs of^" 10%, (b) the cell's incremental sensitivity was an exact Weber-Fechner function over a mean illuminance range of 4 log units, and (c) the response dynamics remain unchanged in the same range of mean illuminance. These observations indicate that the levels of mean illuminance controlled the amplitude scaling of the incremental response but not its dynamics. This is a remarkable finding because the response dynamics, as well as the incremental sensitivity of all the visual systems so far studied, depend upon the levels of mean illuminance. This is the case with Limulus photoreceptors (Fuortes and Hodgkin, 1964), photoreceptors of insects compound eyes (Pinter, 1972 ;Dubs, 1981), vertebrate cones (Baylor and Hodgkin, 1973) and secondorder neurons (;Tranchina et al.,, 1984), and the human visual system. Such a coupling has been one of the principal features of models ofvisual systems (Fuortes and Hodgkin, 1964;Kelly, 1971). This study shows that the couplings of sensitivity and dynamics are not necessarily ubiquitous characteristics of the visual system. We could not measure incremental responses at a mean illuminance of <0.002 wW/cm 2, because the responses were much smaller than the noise at that low illuminance. With a very long exposure to a very low-illuminance light, the sensitivity might be improved and the incremental responses might be measured. The dynamics of incremental responses at a very low mean illuminance may be different from those observed in the present experiment, if indeed they exist. The response of ocellar L-cells to a steady or modulated stimulus is characterized by the loss of the steady hyperpolarizing component, Vo, i.e., Vo is much smaller than VP. This response characteristic appears as a large initial transient hyperpolarization, but the ratio of Vp to Vo differs among the L-cells of several insects. In the dragonfly (Klingham and Chappell, 1978) and bee (Milde, 1981(Milde,, 1984Milde and Homberg, 1984), the steady state response of L-cells has almost no steady component, V.. In the cockroach, as shown here, and possibly in the locust, the response of L-cells retains a small steady hyperpolarization. The transient nature of the L-cell's response indicates that L-cells respond mainly to changes in mean illuminance, I(t), but not to the mean magnitude, lo. The transient nature of the response yields the characteristic incremental sensitivity function. A similar analysis of the response dynamics of the ocelli of other insects should produce results very similar to those found in the cockroach ocellus. 29 0 THE JOURNAL OF GENERAL PHYSIOLOGY " VOLUME 88 " 1986 Spike discharges seen in the L-cell have been associated with the offset of the step stimulus (Ruck, 1957 ;Mizunami et al., 1982). In some insects, the ocellar L-cells produced spontaneous discharges that were suppressed by steady illumination (dragonflies : Chappell and Dowling, 1972 ;bees : Milde, 1981bees : Milde,, 1984. In this study, we found that (a) the first-order kernel for spike discharge was hyperpolarizing ; (b) the second-order nonlinearity was a simple, on-diagonal depolarization ; and (c) the waveform (dynamics) of the first-order kernel for spike discharge was identical to that of the slow potential kernel. These findings suggest that the slow potential, when depolarized to a sufficient degree, produced spike discharges : there was no complex signal transformation between the generation of the slow potential and that of spike discharges. As the first-order kernel is hyperpolarizing for both slow and spike responses, a depolarization (excitation) is produced by a decremental stimulus. The function of spikes of Lcells is to detect dimming from a mean illuminance. In conclusion, we propose a sandwich model for the cockroach ocellus. Receptors and (slow potentials of) L-cells form a linear filter whose gain, but not dynamics, is controlled by the mean illuminance in such a fashion that for a 10fold increase in the mean illuminance, the gain decreases by exactly 1/10. This is a piecewise linearization. The production of a spike discharge in L-cells is a nonlinear process, and the correlation between the white-noise input and the spike discharges identifies the linear and lower-order nonlinear filters. The lowerorder, probably a second-order, nonlinearity produces a depolarization (excitation), which, together with the (linear) depolarization produced by a dimming from a mean illuminance, produces a spike discharge. The linear filter for a spike discharge corresponds to the preceding linear filter formed by the slow responses of L-cells. The horizontal cells in the vertebrate retina are second-order neurons that receive inputs from a large number of receptors. In vertebrates, both the receptors and the (majority of) horizontal cells produce a hyperpolarizing response. In the ocellus, the receptors depolarize and the second-order cells, which also receive inputs from a large number of receptors, hyperpolarize. In the vertebrate retina, transmission is sign-noninverting, whereas in the ocellus it is sign-inverting. Although the sign of signal transmission is opposite, the L-cell and vertebrate horizontal cells share many features : (a) the response to a whitenoise stimulus is almost linear ; (b) the incremental sensitivity is Weber-Fechnerlike, although in the horizontal cell, sensitivity is approximately a Weber-Fechner function, but in the L-cell, it is exactly a Weber-Fechner function ; and (c) the small nonlinearity was for amplitude compression : no complex nonlinearity was found. As we have already discussed, the crucial difference was that the dynamics of the incremental response from all vertebrate horizontal cells were dependent upon the levels of mean illuminance, whereas in cockroach ocellus the response dynamics was independent of the level of illumination. We thank Yu-Ichiro Ando, National Institute for Basic Biology, Okazaki, Japan, for his assistance in analysis and Dr. Masanori Sakuranaga, Canon Research Center, Canon Corp., Atsuki, Japan, for his suggestions. The results shown in Fig |
<filename>src/rewriter.ts
/**
* @author SirJosh3917
* @copyright 2020 SirJosh3917
* @license MIT
* @description This is the core of the denoporter service, which rewrites typescript code.
*/
import ts from "typescript";
/**
* Uses the typescript compiler to parse typescript code, and rewrite the imports.
* @param code The typescript code to rewrite imports for.
*/
export function rewrite(code: string): string {
// https://github.com/Microsoft/TypeScript/wiki/Using-the-Compiler-API
const sourceFile = ts.createSourceFile('rewrite.ts', code, ts.ScriptTarget.Latest);
const printer = ts.createPrinter();
let modified = "";
sourceFile.forEachChild(node => {
const result = rewriteNode(node);
let appendingNode = node;
if (result) {
appendingNode = result;
}
modified += printer.printNode(ts.EmitHint.Unspecified, appendingNode, sourceFile);
modified += '\r\n';
})
return modified;
}
function rewriteNode(node: ts.Node): ts.Node | void {
// we only care about import/export nodes
if (ts.isImportDeclaration(node)) {
if (!ts.isStringLiteral(node.moduleSpecifier)) return; // "If this is not a StringLiteral it will be a grammar error."
const module = node.moduleSpecifier.text;
const rewrittenNode = ts.createImportDeclaration(
node.decorators,
node.modifiers,
node.importClause,
ts.createStringLiteral(rewriteModule(module))
);
return rewrittenNode;
}
if (ts.isExportDeclaration(node)) {
if (!node.moduleSpecifier) return;
if (!ts.isStringLiteral(node.moduleSpecifier)) return; // "If this is not a StringLiteral it will be a grammar error."
const module = node.moduleSpecifier.text;
const rewrittenNode = ts.createExportDeclaration(
node.decorators,
node.modifiers,
node.exportClause,
ts.createStringLiteral(rewriteModule(module)),
node.isTypeOnly
);
return rewrittenNode;
}
}
function rewriteModule(module: string) {
// naive check incase module extension is already defined
if (module.endsWith('.ts') || module.endsWith('.js')) return module;
// in the future, it may be better to check that the given attempted module paths actually exist
return module + '.ts';
} |
<reponame>lakehui/Vim_config
/*
* TEST SUITE FOR MB/WC FUNCTIONS IN C LIBRARY
*
* FILE: dat_wcscoll.c
*
* WCSCOLL: int wcscoll (const wchar_t *ws1, const wchar_t *ws2);
*/
/*
* CAUTION:
* When LC_COLLATE (or LC_ALL) is set for ja_JP.EUC,
* wcscoll() core-dumps for big values such as 0x3041
* (0x0041 is okay) in glibc 2.1.2.
*
* NOTE:
* a) When 0 is expected as a return value, set ret_flg=1.
* - the return value is compared with an expected value: ret_val.
* b) When a positive value is expected as a return value,
* set ret_flg=0 and set cmp_flg=+1.
* - the return value is not compared with the expected value
* (can not be compared); instead, the test program checks
* if the return value is positive when cmp_flg=+1.
* c) When a negative value is expected as a return value,
* ......
* d) When data contains invalid values, set err_val to the expected errno.
* Set ret_flg=0 and cmp_flg=0 so that it doesn't compare
* the return value with an expected value or doesn't check
* the sign of the return value.
*
*
* -------------------------------------------
* CASE err_val ret_flg ret_val cmp_flg
* -------------------------------------------
* a) 0 1 0 0
* b) 0 0 0 +1
* c) 0 0 0 -1
* d) EINVAL 0 0 0
* -------------------------------------------
*/
TST_WCSCOLL tst_wcscoll_loc [] = {
{ { Twcscoll, TST_LOC_de },
{
{ /*input.*/ { { 0x00E1,0x00E2,0x00E3,0x0000 },
{ 0x00E1,0x00E2,0x00E3,0x0000 }, }, /* #1 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x0000,0x00E1,0x00E3,0x0000 },
{ 0x0000,0x00E2,0x00E3,0x0000 }, }, /* #2 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x00E1,0x00E1,0x00E3,0x0000 },
{ 0x0000,0x00E2,0x00E3,0x0000 }, }, /* #3 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x0000,0x00E2,0x00E3,0x0000 },
{ 0x00E1,0x00E1,0x00E3,0x0000 }, }, /* #4 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x00E1,0x0042,0x00E3,0x0000 },
{ 0x00E1,0x0061,0x00E3,0x0000 }, }, /* #5 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x00E1,0x0061,0x00E3,0x0000 },
{ 0x00E1,0x0042,0x00E3,0x0000 }, }, /* #6 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x00E1,0x00E2,0x0000 },
{ 0x00E1,0x00E2,0x00E9,0x0000 }, }, /* #7 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x00E1,0x00E2,0x00E9,0x0000 },
{ 0x00E1,0x00E2,0x0000 }, }, /* #8 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x00E1,0x0092,0x00E9,0x0000 },
{ 0x00E1,0x008E,0x00E9,0x0000 }, }, /* #9 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x00E1,0x008E,0x00E9,0x0000 },
{ 0x00E1,0x0092,0x00E9,0x0000 }, }, /* #10 */
/*expect*/ { 0,0,0, -1, },
},
{ .is_last = 1 }
}
},
{ { Twcscoll, TST_LOC_enUS },
{
{ /*input.*/ { { 0x0041,0x0042,0x0043,0x0000 },
{ 0x0041,0x0042,0x0043,0x0000 }, }, /* #1 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x0000,0x0041,0x0043,0x0000 },
{ 0x0000,0x0042,0x0043,0x0000 }, }, /* #2 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x0041,0x0041,0x0043,0x0000 },
{ 0x0000,0x0042,0x0043,0x0000 }, }, /* #3 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x0000,0x0042,0x0043,0x0000 },
{ 0x0041,0x0041,0x0043,0x0000 }, }, /* #4 */
/*expect*/ { 0,0,0, -1, },
},
#ifdef SHOJI_IS_RIGHT
/* <WAIVER> */ /* assume ascii */
{ /*input.*/ { { 0x0041,0x0042,0x0043,0x0000 },
{ 0x0041,0x0061,0x0043,0x0000 }, }, /* #5 */
/*expect*/ { 0,0,0, -1, },
},
/* <WAIVER> */ /* assume ascii */
{ /*input.*/ { { 0x0041,0x0061,0x0043,0x0000 },
{ 0x0041,0x0042,0x0043,0x0000 }, }, /* #6 */
/*expect*/ { 0,0,0, +1, },
},
#else
/* XXX Correct order is lowercase before uppercase. */
{ /*input.*/ { { 0x0041,0x0042,0x0043,0x0000 },
{ 0x0041,0x0061,0x0043,0x0000 }, }, /* #5 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x0041,0x0061,0x0043,0x0000 },
{ 0x0041,0x0042,0x0043,0x0000 }, }, /* #6 */
/*expect*/ { 0,0,0, -1, },
},
#endif
{ /*input.*/ { { 0x0041,0x0042,0x0000 },
{ 0x0041,0x0042,0x0049,0x0000 }, }, /* #7 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x0041,0x0042,0x0049,0x0000 },
{ 0x0041,0x0042,0x0000 }, }, /* #8 */
/*expect*/ { 0,0,0, +1, },
},
#ifdef SHOJI_IS_RIGHT
{ /*input.*/ { { 0x0041,0x0092,0x0049,0x0000 },
{ 0x0041,0x008E,0x0049,0x0000 }, }, /* #9 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x0041,0x008E,0x0049,0x0000 },
{ 0x0041,0x0092,0x0049,0x0000 }, }, /* #10 */
/*expect*/ { 0,0,0, -1, },
},
#else
/* Do not assume position of character out of range. */
{ /*input.*/ { { 0x0041,0x0092,0x0049,0x0000 },
{ 0x0041,0x008E,0x0049,0x0000 }, }, /* #9 */
/*expect*/ { 0,0,0, 0, },
},
{ /*input.*/ { { 0x0041,0x008E,0x0049,0x0000 },
{ 0x0041,0x0092,0x0049,0x0000 }, }, /* #10 */
/*expect*/ { 0,0,0, 0, },
},
#endif
{ .is_last = 1 }
}
},
{ { Twcscoll, TST_LOC_eucJP },
{
{ /*input.*/ { { 0x3041,0x3042,0x3043,0x0000 },
{ 0x3041,0x3042,0x3043,0x0000 }, }, /* #1 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x0000,0x3041,0x3043,0x0000 },
{ 0x0000,0x3042,0x3043,0x0000 }, }, /* #2 */
/*expect*/ { 0,1,0, 0, },
},
{ /*input.*/ { { 0x3041,0x3041,0x3043,0x0000 },
{ 0x0000,0x3042,0x3043,0x0000 }, }, /* #3 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x0000,0x3042,0x3043,0x0000 },
{ 0x3041,0x3041,0x3043,0x0000 }, }, /* #4 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x3041,0x0042,0x3043,0x0000 },
{ 0x3041,0x0061,0x3043,0x0000 }, }, /* #5 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x3041,0x0061,0x3043,0x0000 },
{ 0x3041,0x0042,0x3043,0x0000 }, }, /* #6 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x3041,0x3042,0xFF71,0x0000 },
{ 0x3041,0x3042,0x30A2,0x0000 }, }, /* #7 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x3041,0x3042,0x30A2,0x0000 },
{ 0x3041,0x3042,0xFF71,0x0000 }, }, /* #8 */
/*expect*/ { 0,0,0, +1, },
},
{ /*input.*/ { { 0x30FF,0x3092,0x3049,0x0000 },
{ 0x3041,0x308E,0x3049,0x0000 }, }, /* #9 */
/*expect*/ { 0,0,0, -1, },
},
{ /*input.*/ { { 0x3041,0x308E,0x3049,0x0000 },
{ 0x30FF,0x3092,0x3049,0x0000 }, }, /* #10 */
/*expect*/ { 0,0,0, +1, },
},
{ .is_last = 1 }
}
},
{ { Twcscoll, TST_LOC_end } }
};
|
Q:
106% complete! Strunk and white progress in review pane
“Your review progress” showing target badge that I've already received
So as far as I know, I haven't made very many edits today. Judging by my history, I have 4 edits and/or retags ("revisions" as it calls them) in the last two days. When I go to the review page, I see a handy-dandy review progress screen on the side. Here's a snapshot:
Don't get me wrong. I'm not saying I should have Strunk and White. It's quite obvious that I have some retags or some other revisions that do not count towards Strunk and White, or I'd have it by now. And that's fine. I'm just pointing out the sidebar is a bit misleading.
A:
I just fixed an issue where Strunk & White was not counting edits on deleted posts, yet the little side bar was.
Now there are 2 cases where the sidebar may be out of sync:
We grant badges at the slowest once every hour. The sidebar may be indicating you are about to get the badge.
We cache the fact you have, or do not have, the badge for 240 seconds.
In future, if you reached more than 100% and have waited an hour, let me know. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.