content
stringlengths 7
2.61M
|
---|
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 18:58:46 2020
@author: naoki
"""
from heapq import heappush, heappop
import sys
N = int(input())
a = list(map(int,input().split()))
hq = []
hq_r = []
for i in range(N):
heappush(hq,a[i])
heappush(hq_r,-1*a[-1-i])
max_A = [0 for _ in range(N+1)]
max_A[0] = sum(hq)
min_C = [0 for _ in range(N+1)]
min_C[0] = sum(hq_r) * (-1)
for i in range(N):
heappush(hq, a[N+i])
heappush(hq_r, -1*a[-N-1-i])
minus = heappop(hq)
minus_r = heappop(hq_r)
max_A[i+1] = max_A[i] + a[N+i] - minus
min_C[i+1] = min_C[i] + a[-N-1-i] + minus_r
max_val = -10e+100
for i in range(N+1):
kari = max_A[i] - min_C[-1-i]
if max_val < kari:
max_val =kari
print(max_val) |
// AppendRange adds r to s, if r is non-empty.
// If possible, it extends the last Range in s.Ranges; if not, it creates a new one.
func (s *Scope) AppendRange(r Range) {
if r.End <= r.Start {
return
}
i := len(s.Ranges)
if i > 0 && s.Ranges[i-1].End == r.Start {
s.Ranges[i-1].End = r.End
return
}
s.Ranges = append(s.Ranges, r)
} |
<reponame>qq5814209/blog
package com.me.vo;
public class LevelValueVo {
int user_id;
int value;
public int getUser_id() {
return user_id;
}
public void setUser_id(int user_id) {
this.user_id = user_id;
}
public int getValue() {
return value;
}
public void setValue(int value) {
this.value = value;
}
}
|
<filename>hard-gists/62d629216e19906c53d3/snippet.py<gh_stars>10-100
import fiona
import click
from rasterio import features, Affine
def makeAffine(bounds, width):
xD = bounds[2] - bounds[0]
yD = bounds[3] - bounds[1]
cS = xD / float(width - 2)
height = int(yD / cS) + 1
return Affine(cS, 0.00, bounds[0] - cS,
0.00, -cS, bounds[3] + cS), height + 1
@click.command()
@click.argument('src_path', type=click.Path(exists=True))
@click.option('--width', type=int, default=100)
def printAscii(src_path, width):
with fiona.open(src_path, 'r') as src:
aff, height = makeAffine(src.bounds, width)
rasarr = features.rasterize(
((feat['geometry'], 8) for i, feat in enumerate(src)),
out_shape=(height, width),
transform=aff
)
for i in rasarr:
click.echo((''.join(i.astype(str).tolist())).replace('0', '.'))
if __name__ == '__main__':
printAscii()
# USAGE
#
# $ python printascii.py <inputgeojson> --width 50
# ..................................................
# .............88888.....................88.........
# ........8888888888....8.............888888.88888..
# ...........888888888..........8...........8888....
# ............................888888................
# ..........................88888888888.............
# ........................8888888888888888..........
# .......................8888888888888888888........
# .....................88888888888888888888888......
# ...................888888888888888888888888888....
# .................888888888888888888888888888888...
# ...............88888888888888888888888888888888...
# ..............8888888888888888888888888888888.....
# ...............888888888888888888888888888........
# .................8888888888888888888888...........
# ...................88888888888888888..............
# .88888888888888888..88888888888888................
# .88888888888888888....888888888...................
# .88888888888888888......8888....88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# .88888888888888888..............88888888888888888.
# ................................88888888888888888.
# ................................88888888888888888.
# ................................88888888888888888.
# ................................88888888888888888.
# .................................................. |
Currently, an optical recording medium, such as a CD (Compact Disc) or a DVD (Digital Versatile Disc), has been widely used as a medium for recording a large amount of data. In recent years, there have been a demand for recording a high-definition moving image and a demand for recording a larger amount of data along with the development of the personal computer. The optical recording medium, such as the CD or the DVD, cannot be sufficiently increased in density to allow such a large amount of data to be recorded thereon. Thus, the large amount of data cannot be recorded on a single disc. To record the large amount of data, therefore, it is necessary to record the data while replacing a plurality of discs. In recent years, attention has been drawn to a hologram recording medium which can record thereon a substantially larger amount of data than the existing CD and DVD.
As hologram information recording, there is a method of dividing beam light into two mutually coherent lights, performing data modulation on one of the lights in a spatial light modulator to form signal light, and combining, on a recording medium, the signal light with the other light which acts as reference light, to thereby record data as interference fringes. The recording medium is then irradiated with the same reference light as the reference light used in the recording so that the signal light can be detected as reproduction light. Thereby, the data can be reproduced. In the hologram information recording/reproducing, the signal light is not reproduced if the wavelength or the irradiation angle of the light used in the information reproduction is different from the wavelength or the irradiation angle of the light used in the information recording. With the use of the above characteristic, a recording has been performed in which the wavelength or the irradiation angle of the light used in the information recording/reproducing is changed to perform multiplex recording of data in the same area of the recording medium.
Patent Document 1: Japanese Unexamined Patent Application Publication No. 2002-216359 |
Extracting Relevant Terms from Mashup Descriptions for Service Recommendation Due to the exploding growth in the number of web services, mashup has emerged as a service composition technique to reuse existing services and create new applications with the least amount of effort. Service recommendation is essential to facilitate mashup developers locating desired component services among a large collection of candidates. However, the majority of existing methods utilize service profiles for content matching, not mashup descriptions. This makes them suffer from vocabulary gap and cold-start problem when recommending components for new mashups. In this paper, we propose a two-step approach to generate high-quality service representation from mashup descriptions. The first step employs a linear discriminant function to assign each term with a component service such that a coarse-grained service representation can be derived. In the second step, a novel probabilistic topic model is proposed to extract relevant terms from coarse-grained service representation. Finally, a score function is designed based on the final high-quality representation to determine recommendations. Experiments on a data set from ProgrammableWeb.com show that the proposed model significantly outperforms state-of-the-art methods. Introduction With the prevalence of web services and related technologies, service computing has become an essential part in the information era. Due to the increasing number of web services, mashup has emerged as a popular technique to facilitate developers creating new web applications through service compositions. Several mashup platforms have drawn great attention in recent years such as IBM's ProgrammableWeb.com and myExperiment.org created by the universities of Southampton, Manchester, and Oxford in the UK. These websites serve as a platform for users to publish and consume web services and their compositions. However, mashup developers suffer from the growing number of available services since it is a challenging and time-consuming task to search for desired component services in a vast repository. Service recommendation and discovery is a popular solution to information overload faced by mashup developers. To avoid confusion, we will use (mashup) developer and (service) user interchangeably in this paper. Service recommendation and discovery is a hot research theme in the service computing society. By analyzing developer's query intentions, service recommendation aims to generate a list of component services in response to facilitate mashup creation. Existing methods are mainly based on content matching between user queries and service profiles from service providers. However, there are two inherent drawbacks with these approaches. Firstly, if a service profile is poorly described by its provider, these methods can hardly rank the service high although it is relevant to the query. Moreover, service profiles are written by service providers while queries are made by mashup developers, leading to a vocabulary gap between the two. Mashup developers may not exactly know what terms they should input when searching a service. Recently, several latent factors models are proposed for service recommendation leveraging mashup-service usage matrix. However, both methods ignore mashup descriptions and therefore suffer from cold-start problem for new mashups. To overcome these drawbacks, we propose to derive high-quality service representations by extracting relevant terms from mashup descriptions, which usually consists of feature words of component services and mashup-specific terms. Since mashup descriptions are written in the vocabulary of developers, the new representation naturally fills the vocabulary gap. Also, the more historical mashups a service is co-invoked by, the higher the descriptive quality of its new representation will be. For example, a service may have terms like "local restaurant reviews" in its profile. If a mashup developer searches for "find nearby food", profile-based methods fail to rank the service high since its profile has no common terms with the query. However, if there is a historical mashup employing the particular service and containing words like "searching food nearby" in its description text, the service may be assigned with terms such as "food" and "nearby" in its new representation. Thus the service may be discovered with the help of the new representation. We propose a two-step approach to accomplish this goal. As a preliminary treatment, we define a linear discriminant function to assign one component service to each word in mashup descriptions. Terms assigned to the same service in a mashup description can be viewed as a separate comment for the service in developer's vocabulary. Obviously, not all terms in the developer's comments are relevant to its responsible service. Therefore, we further propose a novel probabilistic topic model to identify relevant terms and filter noisy words. High-quality service representations can be obtained by aggregating relevant terms in all corresponding developer comments. Based on the new representation, we design a score function to measure the matching degree between services and user queries. The key idea is to employ a distribution of term counts over services, which integrates relevance and popularity. The main contributions of this paper are summarized as follows: We propose a two-step approach to Extract Relevant Terms (ERT) for component services from mashup descriptions. The relevant terms of a service constitute a high-quality representation with clear advantages over traditional service profile. We further design a score function based on the new representation to recommend component services. The score function captures both service-word relevance and their respective popularity in a unified way. Comprehensive experiments on a real-world data set from ProgrammableWeb.com illustrate that the proposed method significantly outperforms state-ofthe-art approaches in recommendation accuracy. The remainder of this paper is organized as follows. Section 2 introduces necessary definitions to describe a mashup platform and then formulates the service recommendation problem. Section 3 presents the relevant terms extraction framework and service recommendation algorithm. Section 4 reports the experimental results and Section 5 summarizes the related work. Section 6 concludes the paper. Problem Definition In this section, we introduce necessary definitions related to mashup platform and then formulate the service recommendation problem. We denote V as the set of unique words that appear in all service profiles and mashup descriptions. S represents the set of web services collected by a mashup platform. Every service s 2 S contains a bag of words SP s offered by its provider as profile. The service profile usually accounts for its core functions and features. Typical examples include Web Service Description Language (WSDL) documents, tags, and unstructured text descriptions. We denote M as the set of mashups created in a mashup platform. Similarly, a bag of words MD m is associated with every mashup m 2 M to give a necessary description. CS m denotes the set of component services invoked by m. Based on these definitions, we now give a formal statement of the problem considered in this paper. Problem: Component Service Recommendation for Mashup Developer. Given a text query from mashup developer, component service recommendation algorithm aims to generate a ranked list of services in response, where higher ranked services should have a greater chance to be invoked by the query user. The component service recommendation algorithm can be deployed in a mashup platform such as ProgrammableWeb.com. Given the text query, the algorithm can facilitate developers finding desirable component services and shorten development cycle. The overview of the proposed model is illustrated in Fig. 1. Firstly, each word in mashup descriptions is associated with one component service by a linear discriminant function. Words with the same component service in a mashup constitute a separate developer comment. The second step uses a novel probabilistic topic model to extract relevant terms from developer comments that are related to each service. Finally, relevant terms are preserved and aggregated to form a new service representation, upon which a score function is further designed to measure the matching degree between services and user queries. A ranked list of services is returned to the requesting developer in a descending order of scores. Model Framework In this section, we will present our two-step approach to extract relevant terms from mashup descriptions, upon which a score function is further proposed to generate recommendation lists. Service assignment for terms Generally, feature words of component services and mashup-specific terms constitute a mashup description. For example, a mashup that invokes Google Maps and Yelp may have the following description: "The application allow users to visualize local restaurants on the map along with customer reviews according to your real-time GPS position." Terms in italics refer to Google Maps while bold terms are relevant to Yelp, with others being mashup-specific. To extract relevant terms from mashup descriptions for services, we propose to generate a coarse-grained representation first and elaborate on it in the second step. Specifically, we conduct service assignment term by term through a linear discriminant function, which is designed based on the following three assumptions: Service assignment for terms is restricted to its corresponding component services. A word is more likely to be assigned to services containing it in the profiles. The more times a word co-occurs with a service in historical mashups, the more likely it is to be assigned with that service. Next, we give several mathematical notations to make our statement concise. I.s; m/ is 1 if service s belongs to CS m and 0 otherwise. Similarly, I.w; s/ is 1 if word w belongs to SP s and 0 otherwise. We define tf.w; m/ as term frequency of word w in MD m. Therefore the co-occurrence times of service s with word w in all historical mashups co.s; w/ can be calculated as follows: Based on our assumptions, service assignment x.w/ for word w in mashup m can be determined by the following equation: where 0 1 is introduced to control the dependence on service profiles. When service assignment for all terms is completed, we combine terms assigned to the same service in a single mashup description to form a separate developer comment on the service. Coarse-grained representation for a service can be derived by aggregation of corresponding developer comments. Relevant terms extraction Obviously, not all terms in the coarse-grained representation is relevant to the corresponding service. Therefore we further propose a novel probabilistic topic model to identify relevant terms for services from the developer's comments. Specifically, we assume terms in each developer comment on a particular service are generated from two topics: a common topic reflecting features of the service and a comment-specific topic reflecting features of the originated mashup. The first topic is responsible for relevant terms while the second topic would likely generate noisy words. Table 1 summarizes the notations used in this model. The generative process of developer comments is formally described as follows: The graphical model corresponding to this process is shown in Fig. 2. As we can see, w d i are observable variables and denoted by shaded circles, while a; b, andare pre-defined hyper-parameters in the model. Others are latent variables and we seek to estimate them using Gibbs sampling. Specifically, we sample the switch variable y d i for word token w d i in developer comment on service s according to the following equation: where n d1 is the number of tokens whose switch variable has been set to 1 in comment d and n d 0 is the number of tokens whose switch variable has been set to 0 in comment d. Also, c sv is the number of times that word v has been assigned to feature topic of service s and c dv is the number of times that word v has been assigned to specific topic of comment d. The superscript ":" denotes a quantity excluding the current instance. After sampling sufficient iterations, we can get a reliable estimation of the latent variables. To extract relevant terms for services, we preserve word tokens with their corresponding switch variables equal to one and drop the others. Finally, we generate high-quality service representations by aggregation of the relevant terms in all developer comments. Recommendation algorithm Based on the high-quality service representation, we now present our service recommendation algorithm for mashup creation. Mathematically, we denote all service representations by a matrix R where each entry R.s; w/ is equal to the term frequency of word w in representation of service s. We observe that the distribution of term frequency over services can be leveraged as a good measure of both relevance and popularity. Firstly, the more relevant a term is to a service, it is more likely to co-occur with the service in developer comments and corresponding term frequency is higher. In addition, the more popular a service is (invoked by more historical mashups), it has a greater chance to be assigned with terms and thus corresponding term frequency is higher. Similar conclusion can be drawn for popular words. Therefore we propose to employ a distribution of the term frequency over services to design a score function for service ranking. Specifically, the distribution of term frequency over services for each word w is calculated as follows: Given a collection of words as a user query Q, we calculate the matching score between service s with respect to Q by accumulating the contribution of every word in the query: Services with higher scores are more likely to satisfy the needs of query user. Therefore, we generate a ranked list of services in descending order of the matching scores in response to the requesting user. Dataset construction Evaluation of the proposed model ERT is conducted on a real-world dataset from ProgrammableWeb.com, which is the largest mashup platform so far. We crawled from the website the metadata of services and mashups ranging from September 2005 to July 2013. Every mashup contains a list of component services and is associated with natural language text to describe its details. Mashup name and tags are also combined into corresponding description text to enhance its semantics. Similarly, each service has a bag of words to describe its functionality and features (i.e., service profiles). Before the raw texts can be used in our experiments, several natural language preprocessing tasks need to be completed. In this paper, we employ a similar approach proposed in Ref. as follows: Tokenization. As an initial treatment, we tokenize the unstructured description text by means of a separator. Filtering. In the second step, we remove stop words that are not helpful for description. Suffix Striping. Thirdly, we get stem words by stripping suffixes from words. For example, notify, notification, and notifications will be replaced with the same stem notify. Error Correction. Meaningful special characters filtered by standard parsing tools are recovered. Examples include EC2 and S3 in Amazon APIs. Table 2 summarizes the basic properties of the final data set. To evaluate the performance of our model, the whole data set is divided into training set and testing set at four points along the timeline. As shown in Fig. 3, data before the dividing point is used for training and the remaining for performance testing. Therefore we obtained four test cases denoted from D1 to D4. Mashup description in the testing set is regarded as developer query and the corresponding component services as the ground truth. Evaluation metrics The and Pre@K is presented in the following equations, respectively: where TK.m/ represents Top-K services recommended for mashup m. We fix K at a reasonable value 5 in this paper, since the average number of component services per mashup is around 2. With recall and precision, the final metric F1 can be calculated as follows: Comparison methods We compare the following methods with the proposed model ERT for service recommendation. Term Frequency-Inverse Document Frequency (TF/IDF). Based on service profiles, a vector of term frequency weighted by inverse document frequency w s is employed to represent a service. The user query Q is represented as a vector of term frequency w q and finally cosine similarity is used to score the matching degree between service and user query: Unigram Language Model (ULM). The baseline models each service profile by a multinomial distribution over terms p.wjs/ and maximum likelihood is used to obtain the parameter estimation. where tf.w; s/ is the term frequency of word w in service s andis introduced to smooth zero probability values. Finally the query likelihood under profile of service s is calculated as a ranking score: Latent Dirichlet Allocation (LDA). The baseline models service profiles by LDA as stated in Ref.. Latent semantic structures (i.e., topic distribution of services p.kjs/ and word distribution of topics p.wjk/) are leveraged to calculate the matching degree between services and user query Q: where r.m; Q/ represents similarity between historical mashup m and the user query Q. As stated in Ref., we first employ LDA to model mashup descriptions and the query likelihood under mashup m is calculated based on latent topics as similarity measure. Coarse-grained Service Representation (CSR). The baseline is a simplified version of the proposed model ERT where relevant terms extraction is eliminated and Eq. is calculated based on coarsegrained service representation. All baselines generate recommendation list in descending order of calculated scores and are evaluated under the optimal settings. Finally, we set up parameters of ERT. The parameter in service assignment for terms is tuned to be 0.2. For hyper-parameters in the topic model for relevant terms extraction, we empirically set a D b D 1 andD 0:01. The number of iterations in Gibbs sampling N is set to 50. All experiments were conducted on a Core 2 Duo 3.00 GHz machine with 4 GB RAM. Recommendation performance In this section, we compare the state-of-the-art methods with our approach on four test cases. MAP, Rec@5, Pre@5, and F1 of different methods are reported in Table 3. As we can see, methods leveraging mashup descriptions (MDCF, CSR, and ERT) clearly outperform those profile-based approaches (TF/IDF, ULM, and LDA) in all evaluation metrics. The vocabulary gap between mashup developers and service providers is responsible for the large performance gap. Furthermore, the proposed model ERT and its simplified version CSR perform much better than MDCF. The reason is that the proposed methods can directly evaluate matching degrees at service level with the help of component service assignment while MDCF scores services using composition as a bridge. Last but not the least, ERT obtains significant improvement over its simplified version CSR in MAP by preserving relevant terms and discarding noisy words, which makes evaluation in Eq. more accurate. The reason for the marginal improvement of ERT over CSR on Rec@5, Pre@5, and F1 is two-fold. On one hand, CSR already achieves very good performance; on the other hand, Rec@5, Pre@5, and F1 do not differentiate rankings of true items in the recommendation lists, which is contrary to MAP. Impact of Now we further study how parameter in service assignment for terms influences the recommendation performance of our model. Figure 4 shows the MAP of CSR and ERT on D1 with increased from 0 to 1 with a step length of 0.1. All other parameters are fixed as described in Section 4.3. It is obvious from Eq. that higher means more trust on service profiles and less belief on cooccurrence in mashup descriptions. From Fig. 4, MAP is modest when we depend solely on co-occurrence in mashup descriptions (i.e., =0). However, MAPs of both ERT and CSR rise steadily with increased from 0 to 0.2 and reach maximum at 0.2, which explains our choice for in experimental settings. Beyond that, MAP of ERT declines as proceeds to 0.9 while CSR remains unchanged. Finally, MAPs of the two algorithms decrease significantly when D1, which means that service assignment based entirely on service profiles is insufficient. For example, tweet is a feature word of Twitter API while it is not contained in the corresponding service profile. Experiments show that terms of tweet are heavily misclassified when D1. However, with the help of co-occurrence information in mashup descriptions by multiple developers, the problem can be greatly alleviated. Impact of N In this section, we evaluate how the number of Gibbs sampling iterations, N, influences the recommendation performance of ERT. To study the effect, MAP of ERT on D1 is presented in Fig. 5 with N varied from 10 to 50 with a step value of 10. All other parameters are fixed as described in Section 4.3. From Fig. 5, MAP of ERT can achieve modest performance in just 10 iterations and the largest difference is less than 1.5%. The experiments prove that our model displays good convergence. The value of MAP is highest when N equals 50, which explains our choice for N in the experimental settings. Qualitative analysis In this section, we take an additional step to examine the effect of relevant terms extraction proposed in Section 3.2. The word translate is used as an example. We applied our method on D1 with the parameters fixed as previously defined. Table 4 lists the Top-5 services in term frequency distribution based on coarse-grained and high-quality service representation, respectively. From Table 4, the term counts of services relevant to translate (Google Translate and Google Ajax Language) are almost unchanged after relevant terms extraction. In contrast, the counts of irrelevant services (Twilio, Google Maps, and Gravatar) are cleared or halved by the same process. All these efforts contribute to a more accurate estimation of p.sjtranslate/ in Eq. and therefore a higher MAP. From the discussion, we can conclude that the topic model proposed in Section 3.2 for extracting relevant terms is effective. Related Work In this section, we discuss several representative works related to our study. These works can be divided into three categories based on their methods: functionalitybased, Quality of Service (QoS)-based, and networkbased service recommendation. Early works on functionality-based service recommendation mainly employed techniques from information retrieval to calculate the matching degree of functionality between services and user queries. In Ref., service and query are represented by vector of words extracted from WSDL documents and cosine similarity is employed to measure their relevance. However, the method relies on exact term matching between query and services, leading to its limited performance. Meng et al. proposed to characterize a service user by keywords and presented an algorithm based on collaborative filtering for service recommendation. The limitation of this approach is that it requires great human effort to offer high-quality domain knowledge. To overcome the drawbacks of keyword-based methods, Li et al. introduced LDA to model service descriptions extracted from WSDL documents and topic-level semantics were explored to enhance the evaluation of their relevance. Another group of researchers focused on ontology-based approaches. Hobold and Siqueira extended WSDL documents to annotate services by SAWSDL and proposed a graph-based method for service composition. However, ontology construction usually calls for human effort and is timeconsuming due to its high complexity. Moreover, as the popularity of REST-ful APIs and unstructured service description, it is difficult to obtain WSDL documents. Recently, several approaches have been proposed to incorporate service usage history into profile-based recommendation. Liu and Fulia followed the idea of collaborative topic regression and combined userservice matrix and service descriptions into a unified latent factor model. In Ref., service correlation is discovered from co-occurrence of services in mashups and incorporated as a regularization term into the traditional matrix factorization framework for mashupservice usage. Both methods utilize service usage history but ignore mashup descriptions. Therefore these methods are limited to in-matrix prediction and suffer from the cold-start problem for new mashup. In contrast, our model leverages both mashup-service usage history and mashup descriptions for a new service representation, which alleviates the cold-start problem and fills the vocabulary gap. Much research is related to the QoS-based recommendation. Collaborative filtering is introduced in Ref. for missing QoS prediction. Top similar services are identified and their QoS values are aggregated as prediction. Service compositions with optimal QoS is also widely studied. Recently, Zhang et al. proposed an improved Fruit Fly Optimization Algorithm to solve the problem with enhanced global searching ability. Reputation is an important QoS factor and emerged as a hot topic in recent years. Wu et al. proposed a dynamic weight formula to calculate service reputation from historical ratings and unfair ratings were removed leveraging the idea of olfactory fatigue phenomenon. Fan et al. employed LDA to cluster services into different domains and recommended services with the highest reputation in each domain. QoS-based recommendation centers on non-functional properties of web services and is different from our focus. There are several methods that exploit social networks for service recommendation. Zhang et al. studied people-service-workflow social network from myExperiment.org and proposed a recommendation algorithm for workflow composition by employing service correlations in the network. The method does not take content information into consideration and therefore fails to respond to the user query. Maaradji et al. proposed to derive implicit social networks from user's composition and consumption activities, based on which user's social proximity and relevance of services are combined to determine the recommendations. However, it focused on recommendation for service users, not for mashups. Conclusion In this paper, we proposed a two-step approach to derive high-quality service representations from mashup descriptions. Based on the coarse-grained representation from component service assignment in the first step, a novel probabilistic topic model is further applied to extract relevant terms and filter out irrelevant ones. Finally, we design a score function based on the derived high-quality representation to evaluate the matching degree between services and queries to generate the recommendation list. Experiments on the dataset from ProgrammableWeb.com demonstrate the effectiveness of the proposed model. For future work, we plan to integrate component service assignment into the proposed topic model framework for a better representation. |
/**
* Implements the {@link Application} interface publishing the received payload as a Spring {@link ApplicationEvent} to all
* matching listeners registered.
* <p>
* In case the {@link ApplicationEventPublisher#publishEvent(Object)} method throws an exception, this exception will be propagated up to
* the {@link quickfix.Session#next()} method. Depending on the value of {@code RejectMessageOnUnhandledException} in the quickfixj
* configuration, the message will be redelivered or dismissed.
*
*
* <p>If this configuration is enabled, an uncaught Exception or Error in the application's message processing will lead to a (BusinessMessage)Reject being sent to
* the counterparty and the incoming message sequence number will be incremented.
*
* <p>If disabled (default), the problematic incoming message is discarded and the message sequence number is not incremented. Processing of the next valid message
* will cause detection of a sequence gap and a ResendRequest will be generated.
*
* @author Eduardo Sanchez-Ros
*/
@Slf4j
public class EventPublisherApplicationAdapter implements Application {
private Consumer<Object> publishEventConsumer;
public EventPublisherApplicationAdapter(ApplicationEventPublisher applicationEventPublisher) {
this.publishEventConsumer = applicationEventPublisher::publishEvent;
}
/**
* {@inheritDoc}
*/
@Override
public void fromAdmin(Message message, SessionID sessionId) {
publishEvent(FromAdmin.of(message, sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void fromApp(Message message, SessionID sessionId) {
publishEvent(FromApp.of(message, sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void onCreate(SessionID sessionId) {
publishEvent(Create.of(sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void onLogon(SessionID sessionId) {
publishEvent(Logon.of(sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void onLogout(SessionID sessionId) {
publishEvent(Logout.of(sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void toAdmin(Message message, SessionID sessionId) {
publishEvent(ToAdmin.of(message, sessionId));
}
/**
* {@inheritDoc}
*/
@Override
public void toApp(Message message, SessionID sessionId) {
publishEvent(ToApp.of(message, sessionId));
}
private <T> void publishEvent(T event) {
try {
publishEventConsumer.accept(event);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
} |
"""Websauna Depot tests."""
|
def uk_postcodes_primary(size: int=None, shuffle: bool=False, seed: int=None) -> pd.DataFrame:
return AbstractSample._get_constant(reference='map_uk_postcodes_primary', size=size, seed=seed, shuffle=shuffle) |
Photographs, transparencies, documents and other images are often electronically scanned to produce a digital representation of the image. Typically, the image is scanned with light in order to generate a digital representation of the image.
In image scanning devices, one or more light-emitting devices are used as the exposure lamp for purposes of scanning. In many desktop scanners, one or more cold cathode fluorescent lamps (CCFLs) are employed. CCFLs have many advantages over other types of light sources, including high intensity, long life, price and high efficiency. However, compared to other light sources, CCFLs require a longer period of time to warm-up. Typically, a CCFL may take from ten seconds to over sixty seconds to approach maximum light output. As such, waiting for a CCFL to become fully warmed-up can delay scanning, particularly when scanner calibration is to be performed prior to the first scan following activation (i.e., initial powering on or resuming from suspend or sleep mode) of the lamp.
Scanner calibration is a well-established process directed to reducing the defects resulting from illumination and sensor array sensitivity non-uniformity. Typically, a scanner is calibrated to a surface of known color (e.g., a white surface), often referred to as a calibration strip. The calibration strip is scanned, and the response signals from the sensors in the scanner are analyzed. Since the calibration strip is a known color, the calibration scan may be used to determine the spectral characteristics (e.g., color) and intensity of light emitted from the lamp, and compute an appropriate gain to be used during subsequent scanning. Such calibration is necessary since, among other things, the spectral characteristics and intensity of emitted light will change over the life of the lamp. In most scanners, however, calibration is not performed until the lamp is fully warmed-up, thus further delaying the first scan following lamp activation. |
from flask import Flask, render_template
app = Flask("DiscordPost")
# WIP ;)
app.run("0.0.0.0", port = 5624)
|
Electrical conduit is a thin-walled tubing used to protect and route electrical wiring in a building or structure. Electrical conduit, often in the form of Electrical Metallic Tubing (EMT), is constructed of straight elongated sections of corrosion resistant galvanized steel of about 10 feet in length, with a diameter of between about ½ and 4 inches. For example, EMT with standard trade size designations of ½ inch, ¾ inch, 1 inch, and 1¼ inch are commonly installed by electricians at the site of installation of electrical equipment, and in compliance with the U.S. National Electric Code (NEC) and other building codes.
Prior to installation, it is often necessary to bend the conduit. This can be accomplished with a manually operated tool known as a conduit bender, which provides a desired bend in the conduit without collapsing the conduit walls. A typical conduit bender includes a handle and a head. The head is generally a one-piece construction, including an arcuate shoe with a lateral concave channel for supporting the conduit. A hook is generally formed into the head proximate to one end of the channel for engaging a portion of conduit received in the channel. The handle, which is generally about 2 to 3 feet long, is secured to the head and is generally positioned in a radial line relative to the arcuate shoe. Such manually operated conduit benders are commonly produced by companies such as Benfield Electric Co., Gardner Bender, Greenlee Tools, Ideal Industries, Klein Tools, and NSI Industries, among others.
To bend the conduit, a length of conduit is positioned on a supporting surface, such as the ground, with a portion of the conduit positioned within the channel of the arcuate shoe, such that the hook of the conduit bender engages the conduit. The handle is then forced to roll the shoe onto the conduit, thereby bending the conduit to fill in the arcuate channel. Accordingly, the use of a manually operated conduit bender requires a stable work surface, as well as space sufficient to manipulate the handle relative to the conduit. For larger size conduit, such as EMT with a designated standard size of a 1 inch or greater, the bending may be assisted by an electric, hydraulic or pneumatic motor. Various heavy-duty wheeled or bench mounted benders are produced by companies such as Greenlee Tools, among others.
Frequently installations require the conduit to be routed along the ceiling or parts of a building structure that are normally out of reach when standing on the ground. In such instances, it is common to utilize a lift, frequently referred to as a “cherry picker,” to safely access the intended conduit route. However, given the limited size of the platform or basket of most lifts, and the lack of a stable horizontal work surface, it is difficult to operate a manual conduit bender while using the lift. Accordingly, most electricians bend the conduit on the ground before loading the conduit onto the lift and ascending to the installation location. If it is determined that additional bending is required, the electrician must then descend back to the ground to conduct the additional bending. In some instances, multiple ascents and descents are required to complete the electrical routing, all of which can significantly add to the time and expense of the electrical conduit installation. Further, in some instances, the electrician may be working with multiple conduit diameters, each of which requires its own specific tool to complete the desired bends.
Recent advances in conduit bending have seen an introduction of portable powered conduit benders. Various examples of such powered benders are disclosed in U.S. Pat. Nos. 7,900,495; 9,718,108 and U.S. Patent Publication No. 2009/0188291, assigned to Husky Tools, Inc. Another example of a bending apparatus is disclosed in U.S. Patent Publication No. 2008/0190164. The aforementioned disclosures are hereby incorporated by reference herein to the extent that they do not contradict teachings of the present disclosure.
Although these benders are satisfactory for their intended purpose, all include a single large exposed, single stage gear drive, which makes the bender both bulky and invites the possibility of injury, as the gear drive includes a pinch point which can bite the user or grab an article of clothing, such as a shirtsleeve, neck lanyard or safety vest. Further, exposure of the drive gear invites the possibility of inadvertent introduction of foreign matter between the gears, which can permanently damage the bender, thereby decreasing its usable life. The present disclosure addresses these concerns. |
import json
from mathapi.lib.math_ops import evaluate_fx, format_fx
from django.http import HttpResponse
def create_range(start, stop, step):
arr = [start]
value = start
while value < stop:
value = value + step
arr.append(value)
return arr
def solve_points(fx, a_limit, b_limit, step):
x_values = create_range(a_limit, b_limit, step)
y_values = []
for x in x_values:
result = evaluate_fx(fx, x)
y_values.append(result)
return {
'x': x_values,
'y': y_values
}
def function_points(req):
# Get params
params = req.GET
step = params.get('step')
# Get body
body = json.loads( req.body.decode('utf-8') )
fx = body['fx']
fx = format_fx(fx)
from_limit = body['from']
to_limit = body['to']
if step != None:
step = float(step)
else:
step = 1
points = solve_points(fx, from_limit, to_limit, step)
res = json.dumps(points)
return HttpResponse(res, content_type="application/json") |
Canonical normalisation and Yukawa matrices We highlight the important role that canonical normalisation of kinetic terms in flavour models based on family symmetries can play in determining the Yukawa matrices. Even though the kinetic terms may be correctly canonically normalised to begin with, they will inevitably be driven into a non-canonical form by a similar operator expansion to that which determines the Yukawa operators. Therefore in models based on family symmetry canonical re-normalisation is mandatory before the physical Yukawa matrices can be extracted. In nearly all examples in the literature this is not done. As an example we perform an explicit calculation of such mixing associated with canonical normalisation of the Kahler metric in a supersymmetric model based on SU family symmetry, where we show that such effects can significantly change the form of the Yukawa matrix. In principle quark mixing could originate entirely from canonical normalisation, with only diagonal Yukawa couplings before canonical normalisation. Introduction There is great interest in the literature in trying to understand the hierarchical pattern of Standard Model fermion masses, the smallness of the quark mixing angles and the two large and one small neutrino mixing angles. One popular way of doing this is to extend either the Standard Model, or one of its more common supersymmetric extensions, by adding a gauge or global family symmetry, G F which is subsequently broken. In such models based on family symmetry G F, Yukawa couplings arise from Yukawa operators which are typically non-renormalisable and involve extra heavy scalar fields,, coupling to the usual three fields, for example: where F represents left-handed fermion fields, F represents the CP -conjugate of righthanded fermion fields, H represents the Higgs field, and M is a heavy mass scale which acts as an ultraviolet (UV) cutoff. In the context of supersymmetric (SUSY) field theories, all the fields become superfields. The operators in Eq.1 are invariant under G F, but when the scalar fields develop vacuum expectation values (vevs) the family symmetry is thereby broken and the Yukawa couplings are generated. The resulting Yukawa couplings are therefore effective couplings expressed in terms of an expansion parameter,, which is the ratio of the vev of the heavy scalar field to the UV cutoff, = M. Explaining the hierarchical form of the Yukawa matrices then reduces to finding an appropriate symmetry G F and field content which leads to acceptable forms of Yukawa matrices, and hence fermion masses and mixing angles, at the high energy scale. Over recent years there has been a huge activity in this family symmetry and operator approach to understanding the fermion masses and mixing angles, including neutrino masses and mixing angles. However, as we shall show in this paper, in analysing such models it is important to also consider the corresponding operator expansion of the kinetic terms. The point is that, even though the kinetic terms may be correctly canonically normalised to begin with, they will inevitably be driven to a non-canonical form by a similar operator expansion to that which determines the Yukawa operator. In order to extract reliable predictions of Yukawa matrices, it is mandatory to canonically re-normalise the kinetic terms once again before proceeding. In nearly all examples in the literature this is not done. The main point of our paper is thus to highlight this effect and to argue that it is sufficiently important that it must be taken into account before reliable predictions can be obtained. Many approaches combine the family symmetry and operator approach with supersymmetric grand unified theories (SUSY GUTs). Such models tend to be more constraining, because the Yukawa matrices at the high scale should have the same form, up to small corrections from the breaking of the unified symmetry. The same comments we made above also apply in the framework of SUSY GUTs. In the SUSY case the Yukawa operators arise from the superpotential W, and the kinetic terms and scalar masses, as well as gauge interaction terms come from the Khler potential, K. In nearly all examples in the literature the superpotential W has been analysed independently of the Khler potential, K, leading to most of the published results being inconsistent. The correct procedure which should be followed is as follows. To be consistent, the Khler potential, K, should also be written down to the same order M −n as the superpotential W. Having done this, one should proceed to calculate the elements of the Khler metric,K ij, which are second derivatives with respect to fields of the Khler potentialK ij = ∂ 2 K ∂ i ∂ j. However, in order to have canonically normalised kinetic terms, the Khler metric has to itself be canonically normalised K ij = ij. In making this transformation, the superfields in the Khler potential are first being mixed and then rescaled. Once this has been done, the superfields in the superpotential must be replaced by the canonically normalised fields. Canonical normalisation is not of course a new invention, it has been known since the early days of supergravity. However, as we have mentioned, for some reason this effect has been largely ignored in the model building community. A notable exception is the observation some time ago by Dudas, Pokorski and Savoy, that the act of canonical normalisation will change the Yukawa couplings, and could serve to cover up "texture zeros", which are due to an Abelian family symmetry which does not allow a specific entry in the Yukawa matrix and is therefore manifested as a zero at high energies. This issue has been resurrected for abelian family models recently. However, as we have already noted, this observation has not been pursued or developed in the literature, but instead has been largely ignored. In this paper we consider the issue of canonical normalisation in the framework of non-Abelian symmetries, in which the Yukawa matrices are approximately symmetric. In such a framework we show that the effects of canonical normalisation extend beyond the filling in of "texture zeros", and can also change the expansion order of the leading non-zero entries in the Yukawa matrix. As an example we perform an explicit calculation of such mixing associated with canonical normalisation of the Khler metric in a recent supersymmetric model based on SU family symmetry where we show that such effects can significantly change the form of the Yukawa matrix. The SU model we consider is a grossly simplified version of the realistic model in, where we only consider the case of a single expansion parameter and perform our calculations in the 23 sector of the theory for simplicity, although we indicate how the results can straightforwardly be extended to the entire theory. An alternative scenario in which in principle quark mixing could originate entirely from canonical normalisation, with only diagonal Yukawa couplings before canonical normalisation, is also discussed. The outline of the rest of the paper is as follows. In section 2 we discuss the issues surrounding canonical normalisation in the Standard Model supplemented by a family symmetry, first without then with SUSY. In the SUSY case we discuss the scalar mass squared and Yukawa matrices for two types of Khler potentia where only one superfield contributes to supersymmetry breaking. In section 3 we discuss a particular model in some detail as a concrete example, namely the simplified SU family symmetry model, focusing on the second and third generations of matter, later indicating how the results can be extended to all three families. We conclude in section 5. Canonical normalisation 2.1 Standard Model with a Family Symmetry In this section we first consider extending the Standard Model gauge group with a family symmetry, under which each generation has a different charge ( for abelian family symmetries ) or representation ( for non-abelian family symmetries ). The family symmetry typically prohibits renormalisable Yukawa couplings (except possibly for the third family) but allows non-renormalisable operators, for example: where i, j are generation indices, M is some appropriate UV cutoff, F represents left-handed fermion fields, and F represents CP -conjugates of right-handed fermion fields, and H is a Higgs field. When the flavon scalar field gets a vev, which breaks the family symmetry, effective Yukawa couplings are generated: The effective Yukawa matrices are determined by the operators allowed by the symmetries of the model, as well as the form that the vev of takes. Even though the kinetic terms are correctly canonically normalised to begin with, they will receive non-renormalisable corrections arising from operators allowed by the family symmetry, which will cast them into non-canonical form. For example, This leads to a non-canonical kinetic term when is replaced by its vev. It is therefore mandatory to perform a further canonical re-normalisation of the kinetic terms, before analysing the physical Yukawa couplings. The canonical normalisation amounts to a transformation which is not unitary but which gives all the fields canonical kinetic terms. The kinetic part of a theory with a Higgs scalar field H, a fermionic field F i and the field strength tensor F corresponding to a gauge field A when canonical will look like: Once we have done this normalisation, we have to rewrite all of our interactions in terms of the canonical fields with the shifted fields. The important point we wish to emphasise is that all the interaction terms should be expressed in terms of canonical fields, before making any physical interpretation. If this is not done, as is often the case in the literature, then the results will not be reliable. SUSY Standard Model with Family Symmetry In the context of supersymmetric theories, it turns out to be possible to automatically canonically normalise all the fields in the theory at once. However these transformations are not always simple, and in practice calculating the relevant transformations may well turn out to be intractable for any given model. The aim of SUSY model builders with respect to flavour is two-fold. The primary wish is to generate a set of effective Yukawa matrices which successfully predict the quark and lepton masses and mixing angles as measured by experiment. However, because of the parameters associated with softly broken SUSY models, there exist dangerous one-loop diagrams which lead to processes such as b → s and → e at rates much greater than predicted by the Standard Model and also much greater than measured by experiment. A successful SUSY theory of flavour will therefore successfully describe fermion masses and mixing angles, while simultaneously controlling such flavour changing processes induced by loop diagrams involving sfermion masses which are off-diagonal in the basis where the quarks and leptons are diagonal. In a SUSY or supergravity (SUGRA) model, very often the starting point in addressing the flavour problem is to propose a set of symmetries that will give rise to non-renormalisable superpotential operators which will lead to a hierarchical form for our Yukawa matrices, arising from some effective Yukawa operators as discussed previously. Extra fields, are introduced that spontaneously break the extra family symmetries. The general form of the superpotential is : Here w ij (/M) is a general function of the extra fields,, which has mass dimension zero and contracts with F i F j to make W a singlet of the extended symmetry group. In models of this type, the amount of flavour violation is proportional to the size of We now proceed to outline the argument that this will not be a problem in general terms in the simplest case, gravity-mediated supersymmetry breaking, where the breaking is due to a single hidden sector superfield, S. As examples, we shall consider the Khler potential in two forms. The first form is: The second form we consider is: Here k() and k() represent functions of the various fields that can be contracted with the matter fields to make the Khler potential a singlet and of the correct mass dimension. Since we are looking at gravity-mediated SUSY breaking, we may use the SUGRA equations which relate the non-canonically normalised soft scalar mass squared matrices m 2 ab in the soft SUSY breaking Lagrangian to the Khler metricK ab = ∂ 2 K ∂ a ∂ b, and the vevs of the auxiliary fields which are associated with the supersymmetry breaking, F m : where we have assumed a negligibly small cosmological constant. Roman indices from the middle of the alphabet are taken to be over the hidden sector fields, which in our case can only be the singlet field S associated with SUSY breakdown. As it happens, for both K 1 and K 2, the non-canonically normalised mass matrix reduces to: This is obvious for K 1, since the Khler metric doesn't involve S, so partial derivatives with respect to S will give zero. To see that eq. reduces to eq. for K 2, is less obvious. We first write: Substituting this into eq. gives a non-canonically normalised scalar mass squared matrix: It is clear that eq. reduces to eq.. However, the physical states are those for which the Khler metric is canonically normalised,K = 1. This is attained b P KP = 1. In order to canonically normalise the mass matrix, we apply the same transformation, and find that the canonically normalised squark mass squared matrix then takes the universal form: We conclude that models with Khler potentials like K 1 or K 2 will result in universal sfermion masses at the high-energy scale. Of course all this is well known, and it has long been appreciated that this would tame the second part of the flavour problem, flavour violating decays. However, what is less well appreciated at least amongst the model building community, is that canonical normalisation corresponds to redefining the fields in the Khler potential, and one must therefore also redefine these fields in the same way in the superpotential. Unless this is done consistently it could lead to a problem with the first part of the flavour problem, because the shifted fields may well no longer lead to a phenomenologically successful prediction of the masses and mixing angles for the quarks and leptons. The procedure is easily formalised in SUSY theories by writing all the fields as i, i = 1 n. Then, at least in principle, if not in practice, a matrix, P can be found which transforms the vector to the vector c for which the Khler metric is canonically normalised: This procedure can be followed providing if P is not a singular matrix. Having found P, one we can write = P −1 c. We can then substitute these into the superpotential now expressed in terms of fields which correspond to a correctly canonically normalised Khler metric: It should be noted that despite the shifts originating from the Khler potential, which is a non-holomorphic function of the fields, the shifted fields only depend on the vevs of the flavon and its hermitian conjugate. Therefore, the shifted superpotential will remain holomorphic in terms of fields. That is to say, the shifted fields will be a function of the corresponding unshifted field, and the vevs which break the family symmetry. Here represents any field, the vev of a flavon field, and the vev of the hermitian conjugate of a flavon field: At this point, if we had a specific model, we would then need to check that the Yukawas are viable. This is then the correct procedure which must be followed in analysing a general model. We now turn to a particular example which illustrates the effects described above, in the framework on a non-Abelian family symmetry. Table 1, the left-handed matter is contained in F i, the right-handed matter is contained in a left-handed field F i. The MSSM Higgs doublets are contained in H; is a field which has broken SO to SU P S ⊗ SU L ⊗ SU R. There are two SU F -breaking fields, 3 and 23. The superpotential has to be a singlet under the combined gauge group SU P S ⊗ SU L ⊗ SU R ⊗ SU F and also neutral under Z 2 ⊗ U. Because of this, the standard Yukawa superpotential: is not allowed because of the Z 2 ⊗ U. As such, we have to move to a superpotential containing non-renormalisable terms. We view this as being the superpotential corresponding to a supersymmetric effective field theory, where some heavy messenger states and their superpartners have been integrated out. Then, assuming that the messenger states have the same approximate mass scale, we write: The a i are parameters that are expected to be of the order of unity, M is the appropriate UV cutoff of the effective field theory. This will clearly lead to a set of effective Yukawa terms when the fields 3 and 23 gain vevs which break the family symmetry. We choose the vacuum structure after King and Ross : And we then trade these for a single expansion parameter, ≈ 1 10. We write it as Y n.c. to represent the fact that it is the Yukawa matrix corresponding to the non-canonical Khler metric. The squark sector In order to write down the squark mass matrices, the first step is to write down our Khler potential. This should be the most general Khler potential consistent with the symmetries of our model up to the same order in inverse powers of the UV cutoff as the superpotential is taken to. In our case, this is M −3. However, from the general arguments of section 2, we know that if we pick our Khler potential, K to be of the form as K 1 (eq. ) or K 2 (eq. ) then we will have universal scalars. The non-canonical form of the scalar mass-squared matrix is: However, we already know exactly what the canonical form of this matrix will look like: This universal form is a direct result of the simple supersymmetry breaking mechanism that we have and canonical normalisation, and is independent of other details about the model. Khler potential for the model We saw in the previous subsection that we will not end up with dangerous off-diagonal elements in the scalar mass matrices for general Khler potentials of the type we are going to look at. We must now write down our Khler potential. We choose this to be of the same form K 1. There will be no M −3 terms, so it will suffice to write this The matrices which diagonalise the Khler metric will in general be large and intractable. In order to proceed, we will have to make some simplifying assumptions. We first assume that the Khler metricK ab = ∂ 2 K ∂ a ∂ b is block diagonal, of the form: In this,K LH represents the block for chiral superfields, F, containing left-handed matter;K RH represents chiral superfields, F, containing right-handed matter;K represents the SU F breaking Higgs fields, 23 and 3 ;K represents the block for the Higgs field that break the GUT symmetry down to the MSSM gauge group, ; finally, the blockK H represents the block corresponding to the MSSM Higgs fields, H. The block diagonal assumption is equivalent to switching off some terms in the Khler potential. The remaining terms in the Khler potential are listed below: Having done this, we now need to calculate the Khler metricK. But since we have set K up specifically such that it is block diagonal, we can instead work out the non-zero blocks,K LH,K RH,K,K andK H. Once we have done so, we need to canonically normalise them. This is done in two stages. The first is a unitary transformation to diagonalise each blockK i : The mixed Khler metric,K, is now diagonal. Then we rescale the fields by a diagonal matrix R such that R i = (K i ) −1/2. These new superfields are then canonically normalised. Then: If we call P the matrix which converts F to the canonical field F c, then we can note two things. Firstly P = R −1 U. Secondly, we can read off: So the Khler metric is equal to P P. The important point to note is that in canoncally normalising, we have redefined our superfields, so we must also redefine them in our superpotential. This is discussed in the next section. Yukawa sector after canonical normalisation In this section we return to the important question of the form of the Yukawa matrices in the correct canonically normalised basis. In order to do this we would have to calculate the shifting in all of the fields in the superpotential. Unfortunately, algebraically diagonalising the sub-blockK is intractable, even for such a simple model. We therefore make a second assumption and neglect the effects of canonical normalisation arising from this sector, although we shall correctly consider the effects of canonical normalisation arising from all the other sectors. Even making this assumption, the expressions we get are not especially pleasant. We then substitute in the form of the vevs (eq. and eq. ). Having done this, we then expand the cofactors of F i F j H as a power series in around the point = 0. The cofactors of n are quite complicated, so we only write out here the expression for the effective Yukawa for the 22 element. The full expressions for all four elements are listed in Appendix A. The important point to note is that, compared to the 23 element of Eq.21, the leading order expansion in has changed. No longer is it at 3, it is now 2. Note that we can write the expressions for the canonically normalized off-diagonal Yukawa matrix elements Y 23 and Y 32 in such a way that they would transform into each other if we interchange b i ↔ c i, as would be expected. We also note that the diagonal matrix elements would transform into themselves under the same substitution, b i ↔ c i. This has been checked explicitly to the order in the Taylor expansion shown in the Appendix. Setting the O parameters b i,c i and d i to unity, the Yukawa matrix then takes the canonical form: We emphasise again that Eq.30 has a different power structure in to the original, non-canonically normalised Yukawa in eq.. What has happened is that the unitary matrix which redefines our fields has mixed them amongst themselves. This leads to a similar (but different) high energy Yukawa texture. This certainly could be a sufficiently different set-up to ruin any predictions that the non-canonical model was designed to make. However we emphasise that this result applies to the simplified SU F model with a single expansion parameter, and not the realistic SU F model of King and Ross with two different expansion parameters. By comparing the non-canonical Yukawa matrix in eq. to the canonical Yukawa matrix in eq., we can see that the Khler mixing angles are large, of O(). In the appendix, we have an expression for the inverse P-matrix, P −1. The large mixing effect can come only from the mixing part of the transformation. Schematically, the appearance of the 2 leading order terms in the off-diagonal elements can then be understood by neglecting all the coefficients of O, as follows: which accounts for the appearance of the 2 leading order terms in the off-diagonal elements. Three generations of matter The procedure we have discussed for the second and third families can straightforwardly be generalised to include also the first family or indeed to any number of generations. The first thing to do is to write down all of the symmetries of the model. We then substitute eq. into the superpotential. Once we have done this, the canonically normalised Yukawa matrix will be the coefficient of Having completed this, the end result is canonically normalised three-generation Yukawa matrices, as required. Note that any step of this calculation could in principle be intractable, and therefore some simplifying assumptions may have to be made. Canonical origin of mixing angles It is possible in principle that all fermion mixing angles could originate from diagonal Yukawa couplings, via canonical normalisation. To illustrate the idea, consider a two generation model, in which the non-canonical Yukawa is diagonal, with the 33 element dominating over the 22 element: So = 0 and ≪ t In general, the mixing part of the canonical normalisation can be parameterised by a unitary rotation matrix, U, and the rescaling can be parameterised by a diagonal matrix, R: This leads to a canonical Y, = − Now consider the values for the parameters that sin ≈, ≈ n with n > 3, r 1 ≈ r 2 ≈ 1 and t ≈ : By taking the leading order in and the leading two orders in in the 33 element, we can get a Yukawa matrix, post canonical-normalisation: This look remarkably like the Yukawa matrix in the full case before canonical normalisation, ( eq. ). Conclusions We have highlighted the important rle that canonical normalisation of kinetic terms in flavour models based on family symmetries can play in determining the Yukawa matrices. Even though the kinetic terms may be correctly canonically normalised to begin with, we have shown that they will inevitably be driven into a non-canonical form by a similar operator expansion to that which determines the Yukawa operators. Therefore in models based on family symmetry canonical re-normalisation is mandatory before the physical Yukawa matrices can be extracted. In SUSY models with family symmetry, the Khler potential should be considered to the same order in the UV cutoff as one takes in the superpotential. Having done so, the Khler metric, which follows from the Khler potential should be canonically normalised. This will save the model from dangerous off-diagonal scalar mass mixing terms in the super-CKM basis (and its leptonic analogue), but the fields appearing in the superpotential must be redefined leading to modified predictions for Yukawa matrices. We have performed an explicit calculation of such mixing associated with canonical normalisation of the Khler metric in a supersymmetric model based on SU family symmetry, and shown that such effects can significantly change the form of the Yukawa matrix. In the simplified example considered, one off-diagonal Yukawa element loses one power of an expansion parameter, ≈ 1 10, corresponding to that element growing by an order of magnitude. We emphasise that this result does not imply that the full realistic SU F model of King and Ross with two different expansion parameters is incorrect. The analysis of the realistic SU F model model with two different expansion parameters is more subtle, and such models may remain completely viable after canonical normalisation. We have also pointed out that the canonical form of the scalar mass matrices takes a universal form as a direct result of the simple supersymmetry breaking mechanism we have assumed. The effects of canonical normalisation on the scalar mass matrices in such realistic SU F models recently considered in must therefore also be reconsidered. Finally we have pointed out that in principle quark mixing could originate entirely from canonical normalisation, with only diagonal Yukawa couplings before canonical normalisation. Although we have only considered a two family example explicitly, we have indicated how the procedure generalises to the full three family case. In conclusion, when looking at the flavour problem in effective field theories based on family symmetries, it is not enough just to find operators which gives a viable Yukawa structure. It is also necessary to examine the structure of the kinetic terms, and ensure that the Yukawa structure remains viable after canonically normalising the kinetic terms, which redefines the fields. These follow from the expressions for the inverse P-matrix after it has been Taylor expanded in to order 3 around the point = 0. The full expression for the lefthanded P-matrix is then, to sub-leading order in : The structure of the right-handed equivalent is exactly the same, but with every b i replaced with a c i. |
Outcome in Patients Treated With Cytoreductive Surgery and HIPEC for Gastric Cancer With Peritoneal Carcinomatosis Background/Aim: We compared patients with advanced gastric cancer versus patients with stage UICC IV and peritoneal carcinomatosis treated with cytoreductive surgery (CRS) and hyperthermic intraperitoneal chemotherapy (HIPEC) versus patients with stage UICC IV treated without HIPEC to ascertain if CRS and HIPEC improve overall survival (OS). Patients and Methods: We retrospectively analysed thirty-seven advanced gastric cancer patients who had been treated at our department from 2012 to 2017. The endpoint was median OS. Results: Eighteen (49%) patients with UICC stage III showed a median OS of 37.4 months. Eight (21%) patients in the HIPEC group reached a median OS of 33.8 months. Median OS in the UICC IV group (11 patients, 30%) treated with a palliative concept was 6.2 months and therefore significantly worse (p=0.004). Conclusion: A systemic approach combined with CRS and HIPEC in selected stage IV gastric cancer patients improves the OS comparable to patients in UICC stage III. |
/*********************************************************************
*
* Software License Agreement (BSD License)
*
* Copyright (c) 2015-2016, <NAME>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the <NAME> nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************/
#ifndef ESTIMATION_INTERNAL_H_
#define ESTIMATION_INTERNAL_H_
#include <combine_grids/merging_pipeline.h>
#include <cassert>
#include <opencv2/core/utility.hpp>
#include <opencv2/core/version.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/imgcodecs.hpp>
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
namespace combine_grids
{
namespace internal
{
#if CV_VERSION_MAJOR >= 4
static inline cv::Ptr<cv::Feature2D> chooseFeatureFinder(FeatureType type)
{
switch (type) {
case FeatureType::AKAZE:
return cv::AKAZE::create();
case FeatureType::ORB:
return cv::ORB::create();
case FeatureType::SURF:
#ifdef HAVE_OPENCV_XFEATURES2D
return xfeatures2d::SURF::create();
#else
return cv::AKAZE::create();
#endif
}
}
#else // (CV_VERSION_MAJOR < 4)
static inline cv::Ptr<cv::detail::FeaturesFinder>
chooseFeatureFinder(FeatureType type)
{
switch (type) {
case FeatureType::AKAZE:
return cv::AKAZE::create();
case FeatureType::ORB:
return cv::ORB::create();
case FeatureType::SURF:
#ifdef HAVE_OPENCV_XFEATURES2D
return xfeatures2d::SURF::create();
#else
return cv::AKAZE::create();
#endif
}
assert(false);
return {};
}
#endif // CV_VERSION_MAJOR >= 4
static inline void writeDebugMatchingInfo(
const std::vector<cv::Mat>& images,
const std::vector<cv::detail::ImageFeatures>& image_features,
const std::vector<cv::detail::MatchesInfo>& pairwise_matches)
{
for (auto& match_info : pairwise_matches) {
if (match_info.H.empty() ||
match_info.src_img_idx >= match_info.dst_img_idx) {
continue;
}
std::cout << match_info.src_img_idx << " " << match_info.dst_img_idx
<< std::endl
<< "features: "
<< image_features[size_t(match_info.src_img_idx)].keypoints.size()
<< " "
<< image_features[size_t(match_info.dst_img_idx)].keypoints.size()
<< std::endl
<< "matches: " << match_info.matches.size() << std::endl
<< "inliers: " << match_info.num_inliers << std::endl
<< "inliers/matches ratio: "
<< match_info.num_inliers / double(match_info.matches.size())
<< std::endl
<< "confidence: " << match_info.confidence << std::endl
<< match_info.H << std::endl;
cv::Mat img;
// draw all matches
cv::drawMatches(images[size_t(match_info.src_img_idx)],
image_features[size_t(match_info.src_img_idx)].keypoints,
images[size_t(match_info.dst_img_idx)],
image_features[size_t(match_info.dst_img_idx)].keypoints,
match_info.matches, img);
cv::imwrite(std::to_string(match_info.src_img_idx) + "_" +
std::to_string(match_info.dst_img_idx) + "_matches.png",
img);
// draw inliers only
cv::drawMatches(
images[size_t(match_info.src_img_idx)],
image_features[size_t(match_info.src_img_idx)].keypoints,
images[size_t(match_info.dst_img_idx)],
image_features[size_t(match_info.dst_img_idx)].keypoints,
match_info.matches, img, cv::Scalar::all(-1), cv::Scalar::all(-1),
*reinterpret_cast<const std::vector<char>*>(&match_info.inliers_mask));
cv::imwrite(std::to_string(match_info.src_img_idx) + "_" +
std::to_string(match_info.dst_img_idx) +
"_matches_inliers.png",
img);
}
}
} // namespace internal
} // namespace combine_grids
#endif // ESTIMATION_INTERNAL_H_
|
<reponame>Aewaterhouse/dhitools<filename>dhitools/__init__.py
import clr
import sys
import os
sys.path.append(r"C:/Program Files (x86)/DHI/2019/bin/x64")
sys.path.append(r"C:/Program Files (x86)/DHI/2020/bin/x64")
clr.AddReference("DHI.Generic.MikeZero.DFS")
clr.AddReference("DHI.Generic.MikeZero.EUM")
clr.AddReference("DHI.Projections")
clr.AddReference("System")
clr.AddReference("System.Runtime.InteropServices")
clr.AddReference("System.Runtime")
|
package at.codetales.iotflux.persons;
import at.codetales.iotflux.persons.handler.PersonHandler;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.reactive.function.server.RouterFunction;
import org.springframework.web.reactive.function.server.ServerResponse;
import static org.springframework.web.reactive.function.server.RequestPredicates.*;
import static org.springframework.web.reactive.function.server.RouterFunctions.route;
@Configuration
public class PersonEndpointConfiguration {
@Bean
RouterFunction<ServerResponse> peopleRoutes(PersonHandler handler) {
return route(GET("/peopleFN"), handler::listAllPersons)
.andRoute(GET("/peopleFN/{id}"), handler::findById)
.andRoute(POST("/addPersonFN"), handler::addPerson)
.andRoute(DELETE("/clearPeopleFN"), handler::clearPeople);
}
}
|
ForeheadplastyA Selective Approach The importance of resecting a strip of galea in surgery for the elimination of forehead wrinkles is reemphasized. High placement and conservative excision of the frontalis muscle will prevent depression. Elevation of ptotic eyebrows requires a separate procedure. Segmented lift of lower brow is discussed to suit individual need. These procedures are not without pitfalls, but they can be easily prevented by careful planning and execution. |
/*
* Developer : <NAME> Vimal
* License Under Developer *
*/
package ui.reports.print.model;
/**
*
* @author <NAME>
*/
public class RegRetModel {
private Object sn;
private Object itemCode;
private Object itemName;
private Object unitName;
private Object quantity;
private Object rate;
private Object itemAmount;
private Object billSundry;
private Object atTheRate;
private Object miscAmount;
private Object rowAmount;
public RegRetModel(Object sn, Object itemCode, Object itemName, Object unitName, Object quantity, Object rate, Object itemAmount, Object billSundry, Object atTheRate, Object miscAmount, Object rowAmount) {
this.sn = sn;
this.itemCode = itemCode;
this.itemName = itemName;
this.unitName = unitName;
this.quantity = quantity;
this.rate = rate;
this.itemAmount = itemAmount;
this.billSundry = billSundry;
this.atTheRate = atTheRate;
this.miscAmount = miscAmount;
this.rowAmount = rowAmount;
}
public Object getSn() {
return sn;
}
public void setSn(Object sn) {
this.sn = sn;
}
public Object getItemCode() {
return itemCode;
}
public void setItemCode(Object itemCode) {
this.itemCode = itemCode;
}
public Object getItemName() {
return itemName;
}
public void setItemName(Object itemName) {
this.itemName = itemName;
}
public Object getUnitName() {
return unitName;
}
public void setUnitName(Object unitName) {
this.unitName = unitName;
}
public Object getQuantity() {
return quantity;
}
public void setQuantity(Object quantity) {
this.quantity = quantity;
}
public Object getRate() {
return rate;
}
public void setRate(Object rate) {
this.rate = rate;
}
public Object getItemAmount() {
return itemAmount;
}
public void setItemAmount(Object itemAmount) {
this.itemAmount = itemAmount;
}
public Object getBillSundry() {
return billSundry;
}
public void setBillSundry(Object billSundry) {
this.billSundry = billSundry;
}
public Object getAtTheRate() {
return atTheRate;
}
public void setAtTheRate(Object atTheRate) {
this.atTheRate = atTheRate;
}
public Object getMiscAmount() {
return miscAmount;
}
public void setMiscAmount(Object miscAmount) {
this.miscAmount = miscAmount;
}
public Object getRowAmount() {
return rowAmount;
}
public void setRowAmount(Object rowAmount) {
this.rowAmount = rowAmount;
}
}
|
// ListNodes return list of nodes
func (s *Service) ListNodes(req models.FilterNodes) ([]*models.Host, error) {
members := s.Client.Members()
nodesResp := []*models.Host{}
for _, n := range members {
tags := convertTags(n.Tags)
if req.Tag != "" {
if findInTags(req.Tag, tags) {
nodesResp = append(nodesResp, convertHost(n))
}
continue
}
if req.Name != "" {
if req.Name == n.Name {
nodesResp = append(nodesResp, convertHost(n))
}
continue
}
if req.Name == "" && req.Tag == "" {
nodesResp = append(nodesResp, convertHost(n))
}
}
return nodesResp, nil
} |
On Tuesday, U.S. District Court Judge Philip Gutierrez issued an order preventing President Trump from revoking DACA protections, and is ordering the administration to reinstate all those who have been dropped from the program. This is more astounding judicial overreach in a year marred by such actions. The initial DACA program was a memo, not legislation, not even a legitimate executive order or regulation. Trump has every right as president to revoke it and enforce that revocation, but this judge has the gall to say he can't.
An earlier circuit court decision handed down in January, blocked Trump from ending DACA in the first place – an action he had initiated last year. The Department of Justice issued an emergency appeal to the Supreme Court to override this decision, but on Monday, the Supreme Court announced that it would not hear it. While this is standard practice when an issue is being litigated in lower courts, the clear abuse of power exercised by these lower courts should compel the Supremes to consider this a special case.
We have been watching this outrageous judicial usurpation of power for a full year. It is sheer lawlessness and has to stop. All public officials swear an oath to protect and defend, not subvert, the Constitution. What they are doing is a threat to the very legal foundations of our Republic.
There is only one punitive remedy that can be taken against such judges. Congress can impeach them. Unfortunately, Congress has impeached only 15 federal judges in its entire history, and only eight of those were actually removed. A good example is the case of Alcee I. Hastings (another U.S. district court judge), who was impeached for accepting a $150,000 bribe to reduce sentences for two mobsters. That is certainly a clear-cut case for impeachment, not to mention significant jail time, but so are these overtly partisan decisions made by judges specifically to thwart the irrefutable authority vested in the presidency.
This kind of legal subversion is actually much more egregious, because it threatens the entire legal structure of our country. If these jurists can successfully usurp the authority of a democratically elected president, it essentially negates the election. To the extent that this is allowed to stand, our nation is no longer a republic, but increasingly resembles a third-world dictatorship.
There is a precedent to convict for such behavior, but not much of one. Supreme Court judge Samuel Chase was impeached by the House of Representatives in 1804 for actions described as "tending to prostitute the high judicial character with which he was invested, to the low purpose of an electioneering partizan" (sic). Unfortunately, the Senate failed to convict, and his case is said to have insulated Supreme Court justices from similar action ever since.
The Supreme Court did hand President Trump a victory on the immigration front this week. It will not affect the DACA controversy, but puts to rest some other legal challenges to Trump's arrest and deportation of criminal aliens. Not surprisingly, this was a defeat for the 9th Circuit, where much of the court obstruction to Trump's immigration policies is originating. The government can now hold criminal aliens indefinitely and can deny bail.
A lower court also ruled this week that the administration could waive environmental regulations to expedite construction of the border wall. So the week was not a complete loss, but it remains frustrating that certain blatantly partisan jurists continue to abuse their power in obstructing the Trump administration's electoral mandate to reverse President Obama's most egregious unconstitutional actions. |
Polygenic prediction of school performance in children with and without psychiatric disorders Suboptimal school performance is often seen in children with psychiatric disorders and is influenced by both genetics and the environment. Educational attainment polygenic score (EA-PGS) has been shown to significantly predict school performance in the general population. Here we analyze the association of EA-PGS with school performance in 18,495 children with and 12,487, without one or more of six psychiatric disorders and show that variance explained in the school performance by the EA-PGS is substantially lower in children with attention deficit hyperactivity disorder (ADHD) and autism spectrum disorder (ASD). Accounting for parents socioeconomic status obliterated the variance difference between ADHDbut not ASDand controls. Given that a large proportion of the prediction performance of EA-PGS originate from family environment, our findings hint that family environmental influences on school performance might differ between ADHD and controls; studying the same further will open new avenues to improve the school performance of children with ADHD. |
This invention relates to dielectric isolation for integrated semiconductor devices, and more particularly to a method of manufacturing integrated semiconductor circuit devices, which is adapted to simultaneously form narrow isolation regions and wide field regions, with high surface planarity, by means of simple steps.
In bipolar type integrated semiconductor circuit devices, the active elements are generally isolated by the PN junction isolation. However, with increasing demand for smaller device sizes and higher packing density, it has become necessary to reduce the isolation areas. The PN junction isolation has been gradually superseded by the oxide isolation (the so-called Isoplanar Process) using thick oxide formed in the silicon substrate through local oxidation.
The oxide isolation method typically comprises placing on a silicon substrate an oxidation-resistant masking layer formed of a composite layer of a thin silicon oxide film and a silicon nitride film, etching the surface of the silicon substrate to form mesa regions for active elements under the masking layers, and thermally oxidizing the silicon substrate to form thick silicon oxide as field isolation regions surrounding the mesa regions. According to this method, the thermally grown thick oxide has increased volume to present surfaces nearly flush with the surfaces of the device regions.
As compared with the PN junction, the oxide isolation can reduce the widths and areas of the isolation regions, and can also reduce stray capacitances between the surface conductors and the substrate due to the thick silicon oxide forming all regions other than the device regions (hereinafter called "the field region(s)"), thereby contributing to increase of the switching speed of the resulting transistors.
However, during the above thermal oxidation step, since lateral oxidation causes formation of "bird's beak" and "bird's head" between the silicon substrate and the oxidation-resistance masking layer, the widths of the isolation regions become greater than an allowable minimum dimension obtained by the conventional photolithography, which is approximately 10 microns. Also, the bird's beak and bird's head spoils the perfect planarity of the substrate surface.
To overcome such disadvantages, an improved isolation technique has been proposed as represented, e.g. by a process described in a paper entitled "A Method for Area Saving Planar Isolation Oxidation Protected Sidewalls" by D. Kahng et al, published in Solid-State Science And Technology issued by J. Electrochemistry Society, Vol. 127, No. 11, November, 1980, pp. 2468-2470. According to this process (hereinafter referred to as "Improved Local Oxidation Process"), in addition to a first oxidation-resistant layer of silicon nitride deposited over the top surfaces of mesa regions, a second oxidation-resistant layer of silicon nitride is deposited, by chemical vapor deposition, on the sidewalls of the mesa regions. The improved Local Oxidation Process can thus prevent widening of the isolation regions caused by the lateral oxidation, and formation of bird's beak and bird's head and can achieve flattening of the silicon substrate surface irrespective of the width of the isolation regions to be formed, by simple steps.
However, according to this process, it takes an impracticably very long time to form thick field regions by oxidizing through an epitaxial layer on the surface of the silicon substrate. To shorten the oxidizing time, buried regions have to be formed in the surface of a silicon substrate by using a mask before the formation of an epitaxial layer on the entire surface of the silicon substrate, and then mesa regions are formed by selectively etching the epitaxial layer with a second mask. Therefore, more critical mask aligning tolerances are required in aligning the second mask with the formerly formed buried region. Furthermore, in the case of narrow isolation regions, a P.sup.+ channel stop layer formed under the bottom of the isolation region can spread to the nearby N buried region, resulting in increased parasitic capacitance, increased leak current between the base region and the P.sup.+ channel stop layer and reduced breakdown voltage.
Recently, a trench isolation technique has been developed, which utilizes a reactive ion etching (RIE) process capable of etching a silicon substrate vertically to the substrate surface to form grooves of a given width having vertical sidewalls. The trench isolation technique is represented, e.g. by a process described in a paper entitled "U-Groove Isolation Technique For High Speed Bipolar VLS's" by Akio Hayasaka et al, published in IEDM 82, 1982, pp. 62-65. According to this process, a silicon substrate is etched using RIE to form deep, shear U-grooves. The grooves or trenches are oxidized to form a dielectric material such as silicon dioxide along their walls, and covered with a dielectric film such as silicon nitride. A polycrystalline semiconductor material is then deposited over the silicon substrate so as to bury the grooves, and then etched back to form a flat surface on the silicon substrate. This process will hereinafter be referred to as "Trench Isolation Process".
In fabricating bipolar integrated circuit devices by means of the Trench Isolation Process, deep isolation grooves can be formed in the silicon substrate so as to penetrate a buried layer formed by diffusion throughout the whole area of the silicon substrate, thereby dispensing with the use of a mask for formation of such buried layer, which has conventionally been employed. However, according to the Trench Isolation Process, it is difficult to simultaneously form a flat surface over small width isolation regions and large width field regions. That is, a separate flattening step using a mask is required to obtain required surface planarity, which leads to an increased number of fabricating steps, and also requires a special mask aligning step because of the tight mask aligning tolerances.
In fabricating bipolar integrated circuit devices, it is desirable to divide a portion of the transistor-forming region in the vicinity of the substrate surface into base and collector contact regions so as to have the buried layer in common with each other, that is, to have the buried layer underlaying both the two regions, in order to assure a high switching speed of the transistor. To this end, it is necessary to form deep trenches for separating device areas of integrated transistor devices and shallow trenches for separating base and collector contact regions in an array satisfying the above dividing requirements, and two masks have to be provided for forming respective deep and shallow trenches. These requirements cause complication of the fabricating steps. |
import { devices } from '@playwright/test';
/** @type {import('@playwright/test').PlaywrightTestConfig} */
const config: import('@playwright/test').PlaywrightTestConfig = {
forbidOnly: !!process.env.CI,
retries: process.env.CI ? 2 : 0,
timeout: 10 * 1000,
use: {
trace: 'on-first-retry',
},
projects: [
{
name: 'chromium',
use: { ...devices['Desktop Chrome'] },
},
{
name: 'Pixel 4',
use: {
browserName: 'chromium',
...devices['Pixel 4'],
},
},
{
name: 'iPhone 11',
use: {
browserName: 'webkit',
...devices['iPhone 11'],
},
},
],
webServer: {
command: 'npm run build && npm run preview',
port: 3000,
},
};
export default config;
|
Chronic constipation diagnosis and treatment evaluation: the CHRO.CO.DI.T.E. study Background According to Rome criteria, chronic constipation (CC) includes functional constipation (FC) and irritable bowel syndrome with constipation (IBS-C). Some patients do not meet these criteria (No Rome Constipation, NRC). The aim of the study was is to evaluate the various clinical presentation and management of FC, IBS-C and NRC in Italy. Methods During a 2-month period, 52 Italian gastroenterologists recorded clinical data of FC, IBS-C and NRC patients, using Bristol scale, PAC-SYM and PAC-QoL questionnaires. In addition, gastroenterologists were also asked to record whether the patients were clinically assessed for CC for the first time or were in follow up. Diagnostic tests and prescribed therapies were also recorded. Results Eight hundred seventy-eight consecutive CC patients (706 F) were enrolled (FC 62.5%, IBS-C 31.3%, NRC 6.2%). PAC-SYM and PAC-QoL scores were higher in IBS-C than in FC and NRC. 49.5% were at their first gastroenterological evaluation for CC. In 48.5% CC duration was longer than 10 years. A specialist consultation was requested in 31.6%, more frequently in IBS-C than in NRC. Digital rectal examination was performed in only 56.4%. Diagnostic tests were prescribed to 80.0%. Faecal calprotectin, thyroid tests, celiac serology, breath tests were more frequently suggested in IBS-C and anorectal manometry in FC. More than 90% had at least one treatment suggested on chronic constipation, most frequently dietary changes, macrogol and fibers. Antispasmodics and psychotherapy were more frequently prescribed in IBS-C, prucalopride and pelvic floor rehabilitation in FC. Conclusions Patients with IBS-C reported more severe symptoms and worse quality of life than FC and NRC. Digital rectal examination was often not performed but at least one diagnostic test was prescribed to most patients. Colonoscopy and blood tests were the first line diagnostic tools. Macrogol was the most prescribed laxative, and prucalopride and pelvic floor rehabilitation represented a second line approach. Diagnostic tests and prescribed therapies increased by increasing CC severity. Electronic supplementary material The online version of this article (doi:10.1186/s12876-016-0556-7) contains supplementary material, which is available to authorized users. Background Chronic constipation (CC) is a common and extremely troublesome disorder that has a negative impact on social and professional life, reduces the quality of life (QoL) and represents a heavy economic burden. CC affects about 12-17% of the world population, with a higher prevalence among females and elderly people. A considerable amount (16 to 40%) of CC patients in different countries use laxatives, and their use is related to increasing age, symptom frequency and duration of constipation; in the USA more than $800 million are spent on laxatives each year. The most widely used criteria to assess CC are the Rome Criteria (Table 1) which separate constipation in functional constipation (FC) and irritable bowel syndrome with constipation (IBS-C). The presence of abdominal pain relieved by defecation characterizes IBS-C. Moreover, some patients consider themselves constipated even when not showing signs or symptoms consistent with Rome criteria (here defined as "No-Rome Constipation", NRC). At present it is unclear whether gastroenterologists use the same diagnostic and therapeutic approach in these different groups of patients. Objective of the study Primary endpoints To describe the diagnostic tools used and the treatments suggested by Italian gastroenterologists for CC patients. Secondary endpoints To assess, among CC patients, the distribution of FC, IBS-C and NRC and the severity of symptoms and QoL. To evaluate whether the diagnosis of FC, IBS-C and NRC could affect the use of the diagnostic tools and the choice of the therapy. To evaluate other possible potential factors affecting the use of the diagnostic tools and the therapeutic choices in CC patients. Study population and questionnaires Fifty two gastroenterologists belonging to different gastroenterological units in Italy on behalf of the Italian Association of Hospital Gastroenterologists and Endoscopists (AIGO), recorded clinical and demographic data of all patients consecutively referred for CC in a two month period (September-October 2013). Bristol scale was used to assess the stool consistency in the previous three months, while symptoms were classified according to Rome III criteria in order to verify whether the patients could be diagnosed as FC, IBS-C, or NRC. In addition, gastroenterologists were also asked to record whether the patients were clinically assessed for CC for the first time or were in follow up. Diagnostic tests, recommended specialist consultations and prescribed therapies were also recorded. Furthermore, patients were required to fill the Patient Assessment of Constipation-Symptoms (PAC-SYM) and the Patient Assessment of Constipation-Quality of Life (PAC-QoL) questionnaires. PAC-QoL is a 28 item self-reported questionnaire used to measure the patient's QoL. It is divided into four subscales: physical discomfort (items 1-4), psychosocial discomfort (items 5-12), worries and concerns (items 12-23), and satisfaction (items 24-28). Recurrent abdominal pain or discomfort b at least 3 days/month in the last 3 months associated with two or more of the following: -Improvement with defecation -Onset associated with a change in frequency of stool -Onset associated with a change in form (appearance) of stool (hard or lumpy stools ≥25% and loose or watery stools <25% of bowel movements) a Criteria fulfilled for the last 3 months with symptoms onset at least 6 months prior to diagnosis b "Discomfort" means an uncomfortable sensation not described as pain For both questionnaires, items are scored on a fivepoint Likert scale, with 4 indicating the worst symptom severity. Inclusion criteria -Patients aged over 18 years evaluated for CC. Exclusion criteria -Presence of known or suspected severe organic disease potentially causing constipation and/or psychiatric disease potentially interfering with questionnaires compilation. -Patients assuming potentially constipating drugs or the onset of constipation after starting any kind of drug. Statistical analysis Data were analyzed by means of the SAS® System for Windows, version 9.2. A prevalence approach was adopted and no imputation was performed for any missing data. The association between categorical variables was analyzed using Chi-Square test or Fisher's exact test (for cell frequencies < 5). In order to correct for multiple comparisons, pairwise tests were adjusted using the Bonferroni method. The association between a continuous and a categorical variable (with two categories) was analyzed by the Wilcoxon-Mann-Whitney test. Finally, the association between a continuous and a categorical variable was analyzed by the Kruskal-Wallis test (or by the ANOVA in case of normal distribution). In case of pairwise comparisons, the Dunn's test was performed. The correlation between two continuous variables was summarized by the Pearson's correlation coefficient in case of normal data distribution, or by the Spearman's correlation coefficient otherwise. All statistical tests were performed with a two-sided significance level = 0.05, therefore p-values lower than 0.05 were considered statistically significant. The PAC-SYM and PAC-QoL total and domain scores were calculated as detailed in Additional file 1, respectively. PAC-SYM total score and PAC-QoL total score were also analyzed through multivariate regression models, adjusting for the following independent variables: age, sex, diagnosis, duration of CC. The results of PAC-SYM are shown in Table 3: IBS-C mean total score was higher than FC and NRC (p < 0.0001) ones. The multivariate regression model suggested that the total score of PAC-SYM (mean: 1.6 ± 0.7) was directly related to the duration of constipation (p < 0.01), and to younger age (p < 0.0001). Abdominal symptoms subscale was significantly higher in IBS-C than in FC (p < 0.05) and in NRC (p < 0.0001). In particular, a positive association was detected between each of the first four items (discomfort, pain, bloating and stomach cramps) which constitutes the abdominal subscale and IBS-C (p < 0.0001). Fecal symptoms subscale was significantly higher in FC and IBS-C than NRC (p < 0.01). Furthermore, there was a positive correlation of the total PAC-SYM score with the number of diagnostic tests (p < 0.0005) and of suggested therapies (p < 0.05). In Table 4 the results of PAC-QoL are shown: IBS-C mean total score was higher than FC and NRC (p < 0.001); all the subscales, excluding the satisfaction subscale, were significantly higher in IBS-C and in FC than in NRC. Moreover, the multivariate regression model for the total score of PAC-QoL (mean: 1.8 ± 0.7) shows that this was neither related to gender, nor to age or duration of constipation. There was a statistically significant positive correlation with the number of diagnostic tests (p < 0.05), the number of suggested therapies (p < 0.0001) and the number of specialist consultations (p < 0.005). Diagnostic tests were requested in 702/878 (80.0%) of the patients. Table 6 shows the different tests requested in the whole sample and in the different diagnosis subgroups (IBS-C, FC and NRC). Fecal calprotectin was more frequently prescribed in IBS-C than in FC and NRC (p < 0.0001 and p < 0.05, respectively). Thyroid function tests (p < 0.05), serology for celiac disease (p < 0.005), lactose breath test (p < 0.01) and glucose breath test (p < 0.05) were more frequently suggested in IBS-C than in FC, whereas in FC anorectal manometry was more frequently prescribed than in IBS-C (p < 0.05) and defecography more frequently than in NRC (p < 0.05). Abdominal ultrasonography was suggested in 22% of the patients without significant differences among groups. Colonoscopy was suggested more in patients ≥50 years than in those <50 years (52.3% vs. 22.5%; p < 0.0001), more in males than in females (51.2% vs. 35.6%; p < 0.001) and more often at first evaluation than at follow-up (43.2% vs. 32.8%; p < 0.005). Also, routine blood tests Table 7 shows the suggested therapies, overall and by diagnosis. In 863/878 patients (98.3%) at least one treatment was given. Lifestyle and dietary changes were the most frequent suggestions, whereas macrogol and fiber supplements were largely the most frequently prescribed substances. A mix of suggestions and drugs was used in many patients: in 59.5% lifestyle suggestions, changes in diet and macrogol; in 50.8% lifestyle suggestions, changes in diet and fiber supplementation; in 37.2% changes in diet, fiber supplementation and macrogol; in 37.1% lifestyle suggestions, fiber supplementation and macrogol; in 33.3% lifestyle suggestions, changes in diet and probiotics. Discussion The present study conveys an important educational message for general practitioners, who see the majority of constipated patients, and for other specialists who could visit patients for possible comorbidities: when collecting the patient's history, the presence of constipation should be accurately searched and treated (if possible). Waiting so many years before sending constipated patients to a gastroenterologist simply means worsening a patient's symptoms and his/her QoL and increasing the risk to develop important anatomical alterations such as perineal descent, rectocele, rectal intussusceptions, prolapse, enterocele or sigmoidocele, or increase his/her cardiovascular mortality. Rome criteria seemed accurate to identify constipated patients, since only 6.2% showed NRC. NRC patients were usually older and often male than IBS-C, and reported fewer and less severe symptoms, softer stools and a better QoL than FC and IBS-C. On the other hand, IBS-C patients were younger and more often female, reported more severe symptoms, harder stools and a worse QoL than NRC. No substantial differences have been introduced regarding definition and classification of functional constipation: simply they state that "abdominal pain and/or bloating may be present but are not predominant symptoms (ie, the patient does not meet criteria for IBS)". Regarding IBS the term discomfort was eliminated and the frequency of abdominal pain became at least 1 day per week instead of 3 days per month. However we think that these changes would not have a significant impact on the results of our study. PAC-SYM and PAC-QoL questionnaires showed higher scores in IBS-C group than in FC and NRC: PAC-SYM abdominal symptom subscale, PAC-QoL mean total score, physical discomfort, psychosocial discomfort and worries and concerns subscales were found to be higher in IBS-C. This reflects the close association between the first four items of PAC-SYM (abdominal discomfort, abdominal pain, bloating, stomach cramps) and the typical symptoms of IBS. These symptoms are likely responsible for the lower QoL in IBS-C. Thus, the increase in perception of constipation severity increases impairment of the QoL, also increasing request of diagnostic tests and therapies. Different clinical characteristics, such as type of constipation and comorbidities, may influence the clinical approach of the gastroenterologists; thus, our primary endpoint was to assess the diagnostic tools and treatment suggested by Italian gastroenterologists to their constipated patients, and the impact on the clinical subgroups. A surprising result, deserving discussion, is that DRE was not performed in more than 40% of the patients, independently from being at first visit or at follow-up. DRE is the simplest and the most immediate method to assess anal tone and to collect information about the pelvic floor conditions and to detect early forms of rectal cancer or benign diseases. These data should be carefully taken into account when carrying out educational campaigns on the diagnosis and treatment of CC. The presence of comorbidities was likely the main reason for the more frequent requested consultations (psychiatric/ psychological, urological, gynecological) underlining the need for a stronger collaboration among different specialists for the correct management of CC, possibly creating multidisciplinary teams. Regarding the attitude towards diagnostic tests, we want to stress that in about four out of five patients gastroenterologists were not so confident on Rome III criteria, and prescribed at least one diagnostic test, more often in patients at first evaluation, mainly blood tests, but also colonoscopy (requested more frequently in patients older than 50 years), anorectal manometry and measurement of colonic transit time. As already shown in previous studies in a general practitioner setting, abdominal ultrasound, although not recommended by current guidelines, was quite frequently requested, especially when abdominal pain is present. To exclude conditions potentially mimicking IBS, laboratory and breath tests were more frequently requested in these patients, whereas in FC, defecography and anorectal manometry were more frequently requested to evaluate the presence of dyssynergic defecation. In NRC patients fewer diagnostic tests were overall required, probably due to less severe symptoms and lesser impairment of the QoL. Overall, dietetic and lifestyle suggestions were the most frequently suggested therapeutic options (>90% of the patients) (Table 7). However, in the present study, the gastroenterologists were often not confident that these could be sufficient to solve the problem and used macrogol as the first line laxative, both in association with dietetic and lifestyle suggestion and fibers. Macrogol is effective and safe, and new liquid formulations make it easier to dose; because taste is an important factor for patients' adherence, particularly for long-time treatment, the formulations without aroma made it more acceptable to patients. On the other hand further increasing fibers intake could induce bloating and abdominal discomfort without improving colonic transit time. To control the different symptoms of IBS (mainly abdominal pain and bloating) gastroenterologists also used antispasmodic drugs, psychotherapy and anti-bloating agents, whereas pelvic floor rehabilitation was suggested more often in FC patients, in whom functional defecation disorders should be more frequent. Surgery procedures (and sacral neurostimulation) were infrequently suggested by gastroenterologists. The gastroenterologists involved in this study rarely prescribed laxatives such as lactulose/lactitole, and stimulant, emollient or saline laxatives which still represent the most used laxatives in Italy. These drugs, which cover about 40% of the Italian market, are more often prescribed by general practitioners and other specialists than gastroenterologists. Prucalopride, recently available on the Italian market, was prescribed in about 13% of patients although it was considered, probably because expensive, a second/third line treatment, and prescribed more frequently at a follow-up. At the time of the study, linaclotide was not yet available on the Italian market. As previously reported for diagnostic tools, the amount of therapy prescribed also increased by increasing PAC-SYM and PAC-QoL scores; in NRC patients, who displayed lighter symptoms, fewer therapies were suggested. In conclusion, in our country a gastroenterological evaluation of CC is often delayed in patients with long lasting symptoms, colonoscopy and blood tests are considered a "first line" diagnostic tool, and DRE is insufficiently performed. Furthermore, constipation is associated with several comorbidities in most patients. Among Italian gastroenterologists macrogol is the most frequently used laxative, while in IBS-C patients a larger amount of drugs is prescribed than in FC and NRC patients. The study also provides several educational ideas to improve the diagnostic and therapeutic approach to CC: general practitioners and other specialists should be suggested to address earlier such patients to a gastroenterologist before long-term complications occur. DRE should be performed in all patients, while echography usefulness should be resized. Conclusions Chronic constipation is a common disorder that has a remarkable impact on the quality of life. We report on diagnostic and therapeutical experiences of Italian gastroenterologists. Patients with irritable bowel syndrome with constipation reported more severe symptoms and worsened quality of life than functional constipation. Colonoscopy and blood tests were the most prescribed tests and Macrogol was the most prescribed laxative. This study can provide several educational ideas to improve the diagnostic and therapeutic approach to Chronic Constipation. |
<reponame>ppartarr/azure-sdk-for-java
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
*/
package com.microsoft.azure.management.mediaservices.v2019_05_01_preview;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* Class to specify properties of default content key for each encryption
* scheme.
*/
public class DefaultKey {
/**
* Label can be used to specify Content Key when creating a Streaming
* Locator.
*/
@JsonProperty(value = "label")
private String label;
/**
* Policy used by Default Key.
*/
@JsonProperty(value = "policyName")
private String policyName;
/**
* Get label can be used to specify Content Key when creating a Streaming Locator.
*
* @return the label value
*/
public String label() {
return this.label;
}
/**
* Set label can be used to specify Content Key when creating a Streaming Locator.
*
* @param label the label value to set
* @return the DefaultKey object itself.
*/
public DefaultKey withLabel(String label) {
this.label = label;
return this;
}
/**
* Get policy used by Default Key.
*
* @return the policyName value
*/
public String policyName() {
return this.policyName;
}
/**
* Set policy used by Default Key.
*
* @param policyName the policyName value to set
* @return the DefaultKey object itself.
*/
public DefaultKey withPolicyName(String policyName) {
this.policyName = policyName;
return this;
}
}
|
Sentiment and Prejudice This article describes the evolution of Edgeworths thought on womens wages and on the principle of equal pay for equal work. We first document Edgeworths early works on exact utilitarianism as an epistemic basis for his reflections upon womens wages. Second, we review his first writings on womens work and wages: early mentions in the 1870s, his book reviews published in the Economic Journal, and the substantial preface he wrote for the British Association for the Advancement of Science 1904 report on Women in Printing Trades. Third, we document his 1922 British Association presidential address in relation to the burgeoning literature on womens work and wages within political economy at the time. Finally, we show that his 1923 follow-up article on womens wages and economic welfare constitutes an update of his aristocratical utilitarianism in the postWorld War I context. |
Dispositional Insight Scale: Development and Validation of a Tool That Measures Propensity Toward Insight In Problem Solving This article reports the development of a brief self-report measure of dispositional insight problem solving, the Dispositional Insight Scale (DIS). From a representative Australian database, 1,069 adults (536 women and 533 men) completed an online questionnaire. An exploratory and confirmatory factor analysis revealed a 5-item scale, with all items loading onto a single factor. Internal consistency was acceptable with a Cronbachs alpha of.74. The DIS showed convergent validity with other constructs that are theoretically related to insight: High need for cognition, intuition and positive affect. Normative data for the scale are also reported. The DIS appears to be useful for measuring a disposition toward insight in problem-solving. |
<reponame>manang-splunk/signalfx-agent
package atlas
import (
"context"
"fmt"
"strconv"
"sync"
"time"
"github.com/signalfx/signalfx-agent/pkg/monitors/mongodb/atlas/measurements"
"github.com/signalfx/signalfx-agent/pkg/utils"
"github.com/Sectorbob/mlab-ns2/gae/ns/digest"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
"github.com/signalfx/golib/v3/datapoint"
"github.com/signalfx/signalfx-agent/pkg/core/config"
"github.com/signalfx/signalfx-agent/pkg/monitors"
"github.com/signalfx/signalfx-agent/pkg/monitors/types"
"github.com/signalfx/signalfx-agent/pkg/utils/timeutil"
)
func init() {
monitors.Register(&monitorMetadata, func() interface{} { return &Monitor{} }, &Config{})
}
// Config for this monitor
type Config struct {
config.MonitorConfig `yaml:",inline"`
// ProjectID is the Atlas project ID.
ProjectID string `yaml:"projectID" validate:"required" `
// PublicKey is the Atlas public API key
PublicKey string `yaml:"publicKey" validate:"required" `
// PrivateKey is the Atlas private API key
PrivateKey string `yaml:"privateKey" validate:"required" neverLog:"true"`
// Timeout for HTTP requests to get MongoDB process measurements from Atlas.
// This should be a duration string that is accepted by https://golang.org/pkg/time/#ParseDuration
Timeout timeutil.Duration `yaml:"timeout" default:"5s"`
// EnableCache enables locally cached Atlas metric measurements to be used when true. The metric measurements that
// were supposed to be fetched are in fact always fetched asynchronously and cached.
EnableCache bool `yaml:"enableCache" default:"true"`
// Granularity is the duration in ISO 8601 notation that specifies the interval between measurement data points
// from Atlas over the configured period. The default is shortest duration supported by Atlas of 1 minute.
Granularity string `yaml:"granularity" default:"PT1M"`
// Period the duration in ISO 8601 notation that specifies how far back in the past to retrieve measurements from Atlas.
Period string `yaml:"period" default:"PT20M"`
}
// Monitor for MongoDB Atlas metrics
type Monitor struct {
Output types.FilteringOutput
cancel context.CancelFunc
processGetter measurements.ProcessesGetter
diskGetter measurements.DisksGetter
}
// Configure monitor
func (m *Monitor) Configure(conf *Config) (err error) {
var client *mongodbatlas.Client
var processMeasurements measurements.ProcessesMeasurements
var diskMeasurements measurements.DisksMeasurements
ctx, cancel := context.WithCancel(context.Background())
m.cancel = cancel
timeout := conf.Timeout.AsDuration()
if client, err = newDigestClient(conf.PublicKey, conf.PrivateKey); err != nil {
return fmt.Errorf("error making HTTP digest client: %+v", err)
}
m.processGetter = measurements.NewProcessesGetter(conf.ProjectID, conf.Granularity, conf.Period, client, conf.EnableCache)
m.diskGetter = measurements.NewDisksGetter(conf.ProjectID, conf.Granularity, conf.Period, client, conf.EnableCache)
utils.RunOnInterval(ctx, func() {
processes := m.processGetter.GetProcesses(ctx, timeout)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
processMeasurements = m.processGetter.GetMeasurements(ctx, timeout, processes)
}()
wg.Add(1)
go func() {
defer wg.Done()
diskMeasurements = m.diskGetter.GetMeasurements(ctx, timeout, processes)
}()
wg.Wait()
var dps = make([]*datapoint.Datapoint, 0)
// Creating metric datapoints from the 1 minute resolution process measurement datapoints
for k, v := range processMeasurements {
dps = append(dps, newDps(k, v, "")...)
}
// Creating metric datapoints from the 1 minute resolution disk measurement datapoints
for k, v := range diskMeasurements {
dps = append(dps, newDps(k, v.Measurements, v.PartitionName)...)
}
m.Output.SendDatapoints(dps...)
}, time.Duration(conf.IntervalSeconds)*time.Second)
return nil
}
// Shutdown the monitor
func (m *Monitor) Shutdown() {
if m.cancel != nil {
m.cancel()
}
}
func newDigestClient(publicKey, privateKey string) (*mongodbatlas.Client, error) {
//Setup a transport to handle digest
transport := digest.NewTransport(publicKey, privateKey)
client, err := transport.Client()
if err != nil {
return nil, err
}
return mongodbatlas.NewClient(client), nil
}
func newDps(process measurements.Process, measurementsArr []*mongodbatlas.Measurements, partitionName string) []*datapoint.Datapoint {
var dps = make([]*datapoint.Datapoint, 0)
var dimensions = newDimensions(&process, partitionName)
for _, measures := range measurementsArr {
metricValue := newFloatValue(measures.DataPoints)
if metricValue == nil || metricsMap[measures.Name] == "" {
continue
}
dp := &datapoint.Datapoint{
Metric: metricsMap[measures.Name],
MetricType: datapoint.Gauge,
Value: metricValue,
Dimensions: dimensions,
}
dps = append(dps, dp)
}
return dps
}
func newFloatValue(dataPoints []*mongodbatlas.DataPoints) datapoint.FloatValue {
if len(dataPoints) == 0 {
return nil
}
var timestamp = dataPoints[0].Timestamp
var value = dataPoints[0].Value
// Getting the latest non nil value
for i := 1; i < len(dataPoints); i++ {
if dataPoints[i].Timestamp > timestamp && dataPoints[i].Value != nil {
value = dataPoints[i].Value
timestamp = dataPoints[i].Timestamp
}
}
if value == nil {
return nil
}
return datapoint.NewFloatValue(float64(*value))
}
func newDimensions(process *measurements.Process, partitionName string) map[string]string {
var dimensions = map[string]string{"process_id": process.ID, "project_id": process.ProjectID, "host": process.Host, "port": strconv.Itoa(process.Port), "type_name": process.TypeName}
if process.ReplicaSetName != "" {
dimensions["replica_set_name"] = process.ReplicaSetName
}
if process.ShardName != "" {
dimensions["shard_name"] = process.ShardName
}
if partitionName != "" {
dimensions["partition_name"] = partitionName
}
return dimensions
}
|
Mechanism of inhibition of human leucocyte elastase by monocyclic beta-lactams. The kinetic and catalytic mechanisms of time-dependent inhibition of human polymorphonuclear leukocyte elastase (HLE) by the monocyclic beta-lactams described by Knight et al. are investigated in this work. The dependence of the pseudo-first-order rate constant (k(obs)) on inhibitor concentration was saturable. The individual kinetic constants for the inhibition by L-680,833, -4-benzeneacetic acid, and L-683,845, -4-benzeneacetic acid, at pH 7.5 were k(inact) = 0.08 and 0.06 s-1 and Ki = 0.14 and 0.06 microM, respectively. The relative potency of this class of compounds as measured by k(inact)/Ki is primarily controlled by the Ki, term which ranged from 6 nM to 8 mM, while K(inact) was relatively insensitive to structural changes and varied by only an order of magnitude. Inactivation by the beta-lactams was efficient, requiring only 1.3 and 1.7 equiv of L-680,833 and L-683,845 to inactivate HLE. These values are indicative of some partitioning between turnover of inhibitor and inactivation. The partition ratio ranged as high as 3.5:1 depending upon the structure of the inhibitors, but this ratio was essentially independent of the availability and identity of a leaving group at C-4 of the lactam ring. Inactivation and partitioning liberate the leaving group when present at C-4. p-Hydroxy-m-nitrophenylacetic acid is liberated from this position at a rate similar to that for enzyme inactivation, suggesting kinetic competence of this process. Other products observed during the interaction of L-680,833 with HLE include a substituted urea, a species previously observed during the base-catalyzed decomposition of this class of compounds, and small amounts of products observed during reactivation of beta-lactam-derived HLE-I complexes. Both the pH dependence of k(inact)/Ki for the inactivation of HLE by -4-benzoic acid and V/K for HLE-catalyzed substrate hydrolysis indicate that a single ionizable group with a pK of approximately 7 must be deprotonated for both processes. This group is likely the active site histidine. The data are consistent with initial formation of a Michaelis complex, acylation of the catalytic serine, and loss of the leaving group at C-4 of the original beta-lactam ring followed by partitioning between regeneration of active enzyme and production of a stable enzyme-inhibitor complex.(ABSTRACT TRUNCATED AT 400 WORDS) |
<reponame>cpw/hacs_waste_collection_schedule
import datetime
import re
from html.parser import HTMLParser
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Auckland council"
DESCRIPTION = "Source for Auckland council."
URL = "https://aucklandcouncil.govt.nz"
TEST_CASES = {
"429 Sea View Road": {"area_number": "12342453293"}, # Monday
"8 Dickson Road": {"area_number": "12342306525"}, # Thursday
}
MONTH = {
"January": 1,
"February": 2,
"March": 3,
"April": 4,
"May": 5,
"June": 6,
"July": 7,
"August": 8,
"September": 9,
"October": 10,
"November": 11,
"December": 12,
}
def toDate(formattedDate):
items = formattedDate.split()
return datetime.date(
int(items[3]), MONTH[items[2]], int(items[1])
)
# Parser for <div> element with class wasteSearchResults
class WasteSearchResultsParser(HTMLParser):
def __init__(self):
super().__init__()
self._entries = []
self._wasteType = None
self._withinWasteDateSpan = False
self._withinHouseholdDiv = False
self._withinRubbishLinks = False
self._todaysDate = None
self._workingWasteDate = None
@property
def entries(self):
return self._entries
def handle_endtag(self, tag):
if tag == "span" and self._withinWasteDateSpan:
self._withinWasteDateSpan = False
if tag == "div" and self._withinRubbishLinks:
self._withinRubbishLinks = False
self._workingWasteDate = None
def handle_starttag(self, tag, attrs):
if tag == "div":
d = dict(attrs)
id = d.get("id", "")
if id.endswith("HouseholdBlock"):
self._withinHouseholdDiv = True
if id.endswith("CommercialBlock"):
self._withinHouseholdDiv = False
if self._withinHouseholdDiv:
s = dict(attrs)
className = s.get("class", "")
if tag == "div":
if className == "links":
self._withinRubbishLinks = True
else:
self._withinRubbishLinks = False
if tag == "span":
if className.startswith("m-r-1"):
self._withinWasteDateSpan = True
if self._workingWasteDate is not None:
if className.startswith("icon-rubbish") or className.startswith("icon-recycle"):
type = s["class"][5:] # remove "icon-"
self._entries.append(Collection(self._workingWasteDate, type ))
def handle_data(self, data):
## date span comes first, doesn't have a year
if self._withinWasteDateSpan:
todays_date = datetime.date.today()
## use current year, unless Jan is in data, and we are still in Dec
year = todays_date.year
if "January" in data and todays_date.month == 12:
## then add 1
year = year + 1
fullDate = data + " " + "{}".format(year)
self._workingWasteDate = toDate(fullDate)
class Source:
def __init__(
self, area_number,
):
self._area_number = area_number
def fetch(self):
# get token
params = {"an": self._area_number}
r = requests.get(
"https://www.aucklandcouncil.govt.nz/rubbish-recycling/rubbish-recycling-collections/Pages/collection-day-detail.aspx",
params=params,
)
p = WasteSearchResultsParser()
p.feed(r.text)
return p.entries
|
Guest post by David Archibald
This is a little bit amusing. In February, I had a post on the solar – sea level relationship which quantified the sea level fall to come to the end of Solar Cycle 25:
http://wattsupwiththat.com/2012/02/03/quantifying-sea-level-fall/
The site “Skeptical Science” has to date carried two pieces in response to that February post: http://www.skepticalscience.com/Why_David_Archibald_is_wrong_about_solar_sea_level.html
and http://www.skepticalscience.com/Why_David_Archibald_is_wrong_about_solar_sea_level_1B.html
My February post was 624 words and 6 figures. The Skeptical Science responses to date total 3,446 words and 17 figures. The relationship I found between solar activity and sea level is 0.045 mm per unit of annual sunspot number. The threshold between rising and falling seal level is a sunspot amplitude of 40. Below 40, sea level falls. Above that, it rises.
So let’s apply that relationship to the know sunspot record back to the beginning of the Maunder Minimum and see what it tells us. This is the result:
Figure 1: Back-tested Sea Level from 1645
The figure shows sea level falling through the Maunder Minimum due to the lack of sunspots and then fluctuating in a band about 60 mm wide before increasing rapidly from 1934. It then shows sea level peaking in 2003 before declining 40 mm to 2040.
That is pretty much in agreement with the data from the last 150 years, as per this figure combining coastal tide gauge records to 2001 and the satellite record thereafter:
Figure 2: Sea Level Rise 1850 with a Projection 2040
The glaciers started retreating in 1859, with sea level responding with a rise of 1 mm per annum up to 1930. There was an inflection point in 1930 with the rate of sea level rise almost doubling to 1.9 mm per annum. Sea level also stopped rising from 2003. So the back-tested model and the sea level record are in agreement for at least the last 150 years.
Jevrejeva et al (http://www.psmsl.org/products/reconstructions/2008GL033611.pdf) reconstructed sea level back to 1700:
Figure 3: Global Mean Sea Level Reconstruction since 1700
This longer term reconstruction shows the rise of sea level once the glaciers started retreating. It also shows the acceleration of sea level rise from the early 1930s. As Solanki noted in 2004, the Sun was more active in the second half of the 20th Century than at any time in the previous 8,000 years: http://earthobservatory.nasa.gov/Newsroom/view.php?id=25538 A sea level response to that would be expected.
In summary, the sea level trend fluctuations driven by the internal variability of the ocean-atmosphere coupled system were overprinted by higher solar activity from 1933 to 2003. The period of best fit within that, from 1948 to 1987, has allowed the solar component of sea level rise to be elucidated.
Advertisements
Share this: Print
Email
Twitter
Facebook
Pinterest
LinkedIn
Reddit |
<filename>maven-code-coverage/src/test/java/com/hgautam/examples/TestMessageBuilder.java<gh_stars>0
package com.hgautam.examples;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestMessageBuilder {
@Test
public void testNameHgautam() {
MessageBuilder obj = new MessageBuilder();
assertEquals("Hello hgautam", obj.getMessage("hgautam"));
}
}
|
/* eslint-env jest */
import { MeasureDistanceMode } from '../../src/lib/measure-distance-mode';
import {
createFeatureCollectionProps,
createClickEvent,
createPointerMoveEvent,
createKeyboardEvent,
} from '../test-utils';
describe('move without click', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handlePointerMove(createPointerMoveEvent(), createFeatureCollectionProps());
});
it('guides are empty', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides).toEqual({ type: 'FeatureCollection', features: [] });
});
it('tooltips are empty', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toEqual([]);
});
});
describe('one click', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
});
it('guides are a single point', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides).toMatchSnapshot();
});
it('tooltips are 0.00', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips[0].text).toContain('0.00');
});
});
describe('one click + pointer move', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handlePointerMove(createPointerMoveEvent([3, 4]), createFeatureCollectionProps());
});
it('guides are two points + line string', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides).toMatchSnapshot();
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
});
describe('two clicks', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([3, 4]), createFeatureCollectionProps());
});
it('guides are two points + line string', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides).toMatchSnapshot();
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
it('can measure kilometers', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips[0].text).toContain('kilometers');
});
it('can measure miles', () => {
const tooltips = mode.getTooltips(
createFeatureCollectionProps({ modeConfig: { turfOptions: { units: 'miles' } } })
);
expect(tooltips[tooltips.length - 1].text).toContain('miles');
});
it('can format distance', () => {
const tooltips = mode.getTooltips(
createFeatureCollectionProps({ modeConfig: { formatTooltip: String } })
);
expect(tooltips[tooltips.length - 1].text).toEqual('314.28368918020476');
});
});
describe('two clicks + pointer move', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([3, 4]), createFeatureCollectionProps());
mode.handlePointerMove(createPointerMoveEvent([4, 5]), createFeatureCollectionProps());
});
it('ending point is clicked point not hovered point', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides.features[2].geometry.coordinates).toEqual([3, 4]);
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
});
describe('three clicks + pointer move', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([3, 4]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([4, 5]), createFeatureCollectionProps());
mode.handlePointerMove(createPointerMoveEvent([6, 7]), createFeatureCollectionProps());
});
it('first feature is a tentative line that contains 4 points', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides.features[0].properties.guideType).toEqual('tentative');
expect(guides.features[0].geometry.type).toEqual('LineString');
expect(guides.features[0].geometry.coordinates.length).toEqual(4);
expect(guides.features[0].geometry.coordinates[3]).toEqual([6, 7]);
});
it('Second feature is a editHandle point with coordinates [1,2]', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides.features[1].properties.guideType).toEqual('editHandle');
expect(guides.features[1].geometry.type).toEqual('Point');
expect(guides.features[1].geometry.coordinates).toEqual([1, 2]);
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
});
describe('three clicks + pointer move + press Escape', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([3, 4]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([4, 5]), createFeatureCollectionProps());
mode.handlePointerMove(createPointerMoveEvent([6, 7]), createFeatureCollectionProps());
mode.handleKeyUp(createKeyboardEvent('Escape'), createFeatureCollectionProps());
});
it('first feature is a tentative line that contains 3 points', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides.features[0].properties.guideType).toEqual('tentative');
expect(guides.features[0].geometry.type).toEqual('LineString');
expect(guides.features[0].geometry.coordinates.length).toEqual(3);
expect(guides.features[0].geometry.coordinates[2]).toEqual([4, 5]);
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
});
describe('three clicks + pointer move + press Enter', () => {
let mode;
beforeEach(() => {
mode = new MeasureDistanceMode();
mode.handleClick(createClickEvent([1, 2]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([3, 4]), createFeatureCollectionProps());
mode.handleClick(createClickEvent([4, 5]), createFeatureCollectionProps());
mode.handlePointerMove(createPointerMoveEvent([6, 7]), createFeatureCollectionProps());
mode.handleKeyUp(createKeyboardEvent('Enter'), createFeatureCollectionProps());
});
it('first feature is a tentative line that contains 3 points', () => {
const guides = mode.getGuides(createFeatureCollectionProps());
expect(guides.features[0].properties.guideType).toEqual('tentative');
expect(guides.features[0].geometry.type).toEqual('LineString');
expect(guides.features[0].geometry.coordinates.length).toEqual(4);
expect(guides.features[0].geometry.coordinates[3]).toEqual([6, 7]);
});
it('tooltip contains distance', () => {
const tooltips = mode.getTooltips(createFeatureCollectionProps());
expect(tooltips).toMatchSnapshot();
});
});
|
Emeritus Senior Minister Goh Chok Tong is greeted by Marine Parade residents at the PAssionArts Festival held at Marine Parade Promenade on 16 August, 2015.
THE Workers' Party's (WP's) decision to field a slate made up of incumbent Aljunied GRC MPs in the same constituency for the upcoming election shows that they take the People's Action Party (PAP) team there seriously, Emeritus Senior Minister Goh Chok Tong said yesterday.
"It doesn't matter who they send," he said, adding that the PAP will field a team which the party regards as right for the GRC.
Mr Goh spoke to the media after meeting residents during a two-hour walkabout in the GRC, which the WP won in the 2011 General Election.
He was accompanied by former PAP chairman Lim Boon Heng and the chairmen of the PAP's branches in Aljunied GRC: Victor Lye, K. Muralidharan Pillai, Chua Eng Leong, Chan Hui Yuh and Shamsul Kamar.
The five have been identified as the likely PAP slate in the GRC. The visit covered the Serangoon Garden food centre, Serangoon North Avenue 1 and Hougang Mall.
Mr Goh, a former prime minister and an MP for Marine Parade GRC, said he was visiting the constituency to provide moral support to the team, who have been working very hard over the last few years. "It's a signal to Aljunied GRC residents that the PAP remains interested in you," he said.
Mr Goh said that the PAP is there to serve Aljunied GRC residents and "we want to have another chance to serve them better than the current MPs".
Residents gave him a good reception, he said, but he acknowledged that personal goodwill towards him may not necessarily translate into votes on the ground: "We have to be realistic about it."
Addressing his younger party colleagues in Aljunied GRC, Mr Goh advised them to focus on "heartware". "Don't worry about hardware like infrastructure. It is important, but more or less done. The way forward is community bonding." |
<gh_stars>0
import { NgModule } from '@angular/core';
// import { CommonModule } from '@angular/common';
import { PlanComponent } from 'app/modules/plan/pages/plan/plan.component';
import { PlanRoutingModule } from './plan-routing.module';
import { SharedModule } from '@app/shared';
import { BillingPaymentComponent } from './pages/billing-payment/billing-payment.component';
import { MaterialModule } from '@app/material/material.module';
import { MatPaginatorModule } from '@angular/material/paginator';
import { MatFormFieldModule } from '@angular/material/form-field';
import { MatInputModule } from '@angular/material/input';
import { FlexLayoutModule } from '@angular/flex-layout';
import { MatDatepickerModule } from '@angular/material/datepicker';
import { MatNativeDateModule } from '@angular/material/core';
import { HintMessageComponent } from './popup/hint-message/hint-message.component';
@NgModule({
declarations: [PlanComponent, BillingPaymentComponent, HintMessageComponent],
imports: [
// CommonModule,
SharedModule,
PlanRoutingModule,
MaterialModule,
MatFormFieldModule,
MatInputModule,
MatPaginatorModule,
FlexLayoutModule,
MatDatepickerModule,
MatNativeDateModule,
],
exports: [
MatPaginatorModule,
MatFormFieldModule,
MatInputModule,
MatDatepickerModule,
MatNativeDateModule,
]
})
export class PlanModule {
}
|
Understanding Belgian Individual Investors Complementarity of Qualitative and Quantitative Methodologies in a Grounded Theory Approach This article describes the methodological reasoning followed while studying Belgian individual investors and shows how two methodological approaches, one qualitative and one quantitative, can together allow to build a real inductive process within priority is given to data and to returns from the field. How do individual investors experience their investment? Are they one or several investors profiles? Our research explores an unknown territory (Bouchard, 200). Many researches focus on investor behaviour bias and their underperformance. No researches studied Belgian individual investors, few studies used mixed methodologies (qualitative and quantitative) and few studies used primary data. Our research proposes to fill that gap. Thanks to the qualitative phase (17 interviews of Belgian investors) we highlighted the importance of family tradition and the influence of environment regarding investment decisions; the difference of perception between investors and their environment, qualities of a good investor and their perception of financial intermediaries. The quantitative phase (706 questionnaires) allowed to discover 5 investors profiles in term of behaviour: the followers, the traditionalists, the sleeping investors, the experts and the gamblers. This article also pinpoints all difficulties met during the research using grounded theory and proposed the solutions used by the authors. Introduction Does the individual Investor (with a big « I ) exist or are there several investor's profiles? In order to answer this difficult question, we decided to use two methodological approaches one after the other. First, qualitative interviews were led with 17 Belgian individual investors (between June and December 2015) and then a qualitative approach by mean of a questionnaire gave 706 answers (between October 2016 and December 2017). The methodological approach of grounded theory is inductive. It's being in the field's data which allow to begin the research and not previous results of other researchers (Luckerhoff & Guillemette, 2012a). In opposition with approaches which are based on existing theories and which have as goal to validate or not hypotheses, grounded theory don 't want to force the data to enter the theoretical background but allows data to propose new theories. Induction (meaning being open to all what comes from data) is omnipresent in both the two steps of our research but also in the methodological way going from one research phase to the other. For example, the questionnaire used in the second phase was built on the basis of the interviews' results of the first phase. Priority is given to the data, as well in the qualitative approach as in the quantitative one. The use of the previous studies is only present after the field has been studied. As proposed by Glaser and Strauss those previous works allow theories to emerge from data. methodologies used to answer our question; section 4 shows the complementarity of qualitative and quantitative methodologies and explain how Grounded Theory is aware of combining those methodologies in the same research project; results emerged from the ground are exposed in section 5; section 6 is dedicated to the difficulties we faced conducting this study and how we managed to find out solution. Research Question The goal of our first research phase is to understand the existing link between individual investors and information. Where did they find it? Which where their privileged sources of information? In which type of information are they interested? This research on the individual investors is the following of several others in the field of financial communication on Internet (Pozniak & Croquet, 2011;Pozniak & Guillemette, 2013;Pozniak, 2010;Pozniak, 2013b). Focusing on individual investors was the following of the PhD of one of the authors. Her thesis (Pozniak 2013a) studied financial communication of SME quoted on unregulated markets of Brussel and focused on company's point of view. Then the authors wanted to study another point of view: the one of the investor. What make qualitative interviews so rich is namely all the unexpected results that we can discover. In our case, meeting investors allowed us to let emerge elements as their relations with financial intermediaries, their perception of the trade world, the feeling they have about how they are perceived by their environment (family, friends),. In fact, since the first interview, it's all the whole life experience of the individual investor which appeared, and not only their behaviour regarding information. So, quickly, our research's questions evolved and enlarge: from what is the link with the information to which situation are the investors living? We thus already understood that we were not able to speak about one single type of investors with a common behaviour. Several profiles seemed to emerge from interviews concerning risk, relations with others (intermediaries, family, friends), research of information, use of it The second phase of our research wanted to validate or not (in a statistical way) the various trends observed during the interviews and to build investors' profiles. Two Methodological Approaches to Reach the Research Questions Two methodological approaches are used one after the other: qualitative interviews led with 17 individual investors (in Belgium between June and December 2015) and quantitative study by mean of a questionnaire (between October 2016 and January 2017 -whit 706 answers. Phase 1: Qualitative Approach via Interviews Several authors (Miles 1 Huberman, 2003: Paill, 2007: Evrard, Pras & Roux, 2009) demonstrated the relevance of a qualitative study when being in an understanding phase regarding a specific phenomenon. The choice of a qualitative approach can be justified at different levels: epistemology, methodology and social (Paill, 1996). At an epistemological point of view, qualitative approach seems relevant simply by the need to better know the studied phenomenon considered as a human one. At a methodological level, qualitative research can be justified by the understanding aspect, the inductive one (it's being in contact with the field which allows to understand it), the recursive one (repeating the steps if needed) and the flexible one (adaptations are possible regarding unexpected situations). At a social level, qualitative approach seems to be judicious by the fact it is developed near people, places, experiences and problems studied. Savoie-Zajc (2000:174) enlarged the concept of « qualitative approach « by adding the adjective « interpretative. Regarding that author, the word « qualitative is linked to the nature of the used data and the word « interpretative is relative to the epistemological concept behind the qualitative approach. In fact, interpretative authors try to understand in a rich way the studied phenomenon using the sense given to it by the actors who live it. Other authors (Dithley, 1942: Weber, 1949: Blum, 1969: Husserl, 1977: Schutz, 1987 consider also that point of view saying it's from the conscious of the phenomenon by the people who live it that a human phenomenon can be studied. real-life experience of people and what they say about it (Blanchet & Gotman, 2007). The first step of our research was thus mainly interpretative because the real life of the situation is human based, it has a sense and can be interpreted (Blumer, 1969;Corbin & Strauss, 2008). The building of our investors' sample has not been so easy. In fact, no list of individual investors is available. We thus contacted investors by different ways (investors' club, financial companies, associations) and we also used their own network. Our 17 interviews lasted around one hour and a half and were led in Belgium between June and December 2015. We used QSR NVivo 10 to analyse our data. We considered thematic analysis (Paill & Mucchielli, 2008;Corbin & Strauss, 2008). This analysis is linked to the way they lived the phenomenon we wanted to understand and not only based on the words used. Phase 2: Quantitative Approach via Questionnaire on a Large Scale Our questionnaire, made via LimeSurvey was divided in different parts: Stock exchange and me: origin of the investment (when? Why?), way to invest (alone? Via intermediaries? Why?) My investments and me: choice in terms of sector, company products, financial products, value, Risk and me: perception of risk, choices of investments regarding the risk, average amounts invested, frequencies Company and me: contacts with the company (visit, manager's meeting) Information and me: type of information needed, sources of information Profile of the investor: gender, age, education level, professional situation, localisation, internet profile (from basic to expert) That questionnaire was available via different ways: investors' clubs, investor's network of phase one, participants to a conference (organised by the authors of this article -19 October 2016) and via a partnership with MediaFin (Belgian specialised media company) which sent it to readers of financial Belgian journals (l'Echo and de Tijd) 706 answers were collected. Data were analysed via SPSS 22.0. A factorial analysis allowed to identify the main dimensions of the phenomenon. A segmentation technic based on those dimensions proposed then different profiles. Complementarity of Qualitative and Quantitative Methodologies in a General Grounded Theory Approach Complementarity between qualitative and quantitative methodologies is pinpointed in lot of methodological research literature ( During the preliminary steps of a research, qualitative technics are more often used. It was the case in our research project: individual investors are not well known and few scientific papers focus on them, the way they live their investment and their profile. Our research project was thus to study an unexplored territory. In fact, in the scientific literature, we can identify two research fields which can be linked to individual investors: -The behavioral finance which studies the psychological aspects of the financial decision making process and explains the irrationality of the investors when they take decisions linked to their financial investments. (Kumar & Goyal, 2014). Kumar & Goyal realized a systematic literature revue of the available literature in October 2013 concerning behavioral bias of individual investors when making financial investments. Among the 17 scientific articles analyze, none of them concerns Belgium, none of them used both qualitative and quantitative approaches and more than 82 % of the articles use secondary data. Conclusion of the authors is thus: "attention should be given to primary data-based empirical research to analyze the behavior of investors during investment decision-making" (Kumar & Goyal, 2014: 102). Exploratory studies are appropriate "when few is known about a specific management issue ". They are realised when quite no knowledge exists regarding a specific management problem and when the researcher tries to understand the global phenomenon linked to that problem (Baines, Fill & Page, 2012: 134). Those studies have the task of clarifying the complexity of the subject and of identifying the various aspects (Lendrevie & L vy, 2012:69). They allow to better understand a phenomenon, to prepare deeper studies and to build hypotheses to be validated via quantitative approaches. Qualitative approach comes usually before quantitative studies, but it is not an obligation. Qualitative approach can be done to understand more in depth a subject studied before in a quantitative way (Gauthy-Sin chal & Vandercammen, 2010; Cooper & Schindler, 2006;). Methodological flexibility is one of the main characteristics of Grounded Theory (GT). More linked to qualitative approaches, GT is not against quantitative data and their analysis. The founding book of Glaser and Strauss about GT has already talked about collecting quantitative and qualitative data in the same research project. "Openness and adaptability are important principles of GT and are very different from improvisation or "anything goes." (Luckerhoff & Guillemette, 2012b: 17) In GT, the research can be built based on inductive and deductive phases and can mobilize both quantitative and qualitative methodologies. In both qualitative and quantitative phases, we followed the GT spirit: to be open to the data, listen to the field, always go back to the basic material. Guillemette & Luckerhoff remind that what is important if you want to follow grounded theory is to use an epistemological position which changes the usual sequences of steps in the research. The basis is not hypothesis. The inductive logic leads to be open to what comes from the data. Induction is omnipresent in both the two phases of our research and at each moment of our general methodological reasoning. Different elements illustrate this: After two or three interviews, a first analysis has been realized and first conclusions were confronted to the field in the next interviewed; The structure of our interview (grids) evolved regarding collected data; As soon as we began interviews, schematisation (in construction of course) are proposed to be considered as basis for the analysis of the data. Notes are written at each level: during interviews, after interviews while discussions between the authors, schematisation, feelings while coding the results, The final theoretical proposition is developed based on multiple round trips between first schematisation, data, notes, The quantitative questionnaire is based on the qualitative interview grids. It's not base on a literature revue. It's the field which allowed to build the questionnaire. It was developed using the structure of the grids of the first phase. Some items were deleted, some others added. The words used by the people we met and the important extracts of interviews were also considered. Results The qualitative study and its 17 interviews let emerge some interesting elements linked to the way individual investors live the situation. Appeared namely the importance of the family and of the environment as origin of their first investment on stock exchange, the difference of perception between investors and their environment, qualities of a good investor and their perception of financial intermediaries. Those elements and our theoretical propositions are under review right now. The large scale survey and its 706 questionnaires allowed to pinpoint 5 investors' profiles in term of behaviour: the followers, the traditionalists, the sleeping investors, the experts and the gamblers. Different items, emerged while the qualitative approach and were validated by the quantitative one. They join the sensitizing concepts defined: The influence of the family, identified during interviews, also appeared in the survey with 16 % of the investors which invest because a member of the family did it also. 11% of the investors precise that it's a family tradition what confirms the researches of Barnea et al. ; The influence of peers (Madrian & Shea, 2000;Duflo & Saez, 2002;) is present both in the qualitative and quantitative steps when we discovered that professional and family environments are at the origin of the investment. The illusion of control (Peteros & Aleyeff, 2013), or propensity that people have to identify success asd theirs and failure due to independent factors clearly appeared in our interviews and was confirmed via the survey (49 % of the investors invest alone, without intermediaries, in order to keep control on their investment). The lack of trust regarding financial intermediaries, hardly pinpointed in our interviews is also a quantitative result. 28 % of the investors who invest alone do it because they consider that the financial intermediaries only try to sell their own products without considering the real needs of the investors and without offering a real service or advice. The fact to invest in order to live intense sensations () and to gamble appeared in our interviews and in the fifth identified profile. Difficulties Leading Grounded Theory Several difficulties are linked to an inductive approach. We will try in this section to present the one we met and the way we managed it. Our first difficulty was to identify potential people living our phenomenon. In fact, no list or data base exists concerning individual investors. To collect data during the qualitative phase was thus really a challenge. We then worked to develop a network of investors around us and we then used their own network to enlarge our approach. In the same way, not to know the entire population was also a problem during the quantitative approach. It was not possible to use a statistical sample which could be representative of the population. We thus develop a partnership with a private actor (MediaFin) acting in the interesting field of media and attracted by the future results of our research. They allow us to contact their data base of investors and to participate to an event. At another level, during the transcription phase, we thought to use a software proposed with Dictaphones to gain time but the results we obtained were so bad we then decided to act in a productive manner by making useful transcription: we did in the same time a first analyse, identifying important items, making links it allowed thus to begin the analysis phase with prepared material. We also met problems when communicating about our research. The most important was linked to the qualitative step. It seems that qualitative approaches are not really well known and well perceived by people. Methodology and results are often compared to quantitative one as well while presenting a paper in a conference as when writing an article. Authors always were confronted to questions such as: « your sample seems too small, what's your « n ? « Is your sample representative?. Reaction was each time to use previous researches or scientific references to prove and explain the situation (what is theoretical sampling, what's the saturation concept). Finally, developing a quantitative approach via questionnaire and using an inductive point of view seems to be perceived as non-sense for lot of researchers, namely in management sciences. Quantitative approach is often, not for good reasons, assimilate to a deductive positioning. Glaser & Strauss explained already in their founder's book about grounded theory that a quantitative phase could be developed in an inductive framework. Nevertheless, our advice should be not to enter that debate if you want to publish your paper in a classical scientific review and to avoid to claim your inductive positioning Idea is not to be a liar, nether to pretend to be in a deductive positioning but just to avoid to claim that the use of previous researches has no importance before to enter the field. It's always possible to present the paper in a classical view: literature review, methodology, data, results without to pinpoint that the literature review has been written after the rest Conclusion Using grounded theory implies first of all to give priority to the field and the data which emerge from it. Nevertheless, the idea is not to ignore previous studies and to claim realizing a study without any theoretical references. It is more to try to develop a specific relation with the results proposed in previous researches, to create and let grow a theoretical feeling. On the contrary in a traditional deductive approach for which the literature review allow the emergence of the hypotheses, the grounded theory tries to avoid to force the data to enter in a specific prejudiced framework. Previous results are not considered before to develop the research but are thus mobilized during the various steps of it, reacting to the field. A radical inductive approach during the two phases of our research, qualitative and quantitative, enabled us to let emerge rich results as well as the living experience level of the investors as when building their profiles. Our research is original at various levels. First, it deals of Belgian individual investors, never studied before. Secondly, it mobilizes a qualitative approach followed by a quantitative one, which has never been used to study that thematic. Finally, it uses primary data obtained via our own large scale study which is different from the previous studies mainly using secondary data. (Kumar & Goyal, 2014). |
Cormoret
History
Cormoret is first mentioned in 1178. By the end of the 12th Century Cormoret was owned by the town of Saint-Imier. In 1317 Rodolphe IV., count of Neuchâtel granted the village and inhabitants to Jean Compagnet de Courtelary, under the authority of the Prince-Bishop of Basel. Cormoret belonged to the Barony of Erguel. In 1530 the village adopted the Reformed faith.
A fire destroyed part of the town in 1795, but about thirty buildings from the 16th to 19th century still remain in the village. Several water powered mills from the same period also still exist, though many of them were converted into factories in the 19th century. The local economy was built on agriculture and starting in the 19th century on watch manufacturing. The last watch part factory in the village closed in 1983. In 1885 Cormoret built the first public lighting system in Switzerland. The village's train station was built in 1890.
Geography
Cormoret has an area of 13.48 km² (5.20 sq mi). Of this area, 7.31 km² (2.82 sq mi) or 54.2% is used for agricultural purposes, while 5.7 km² (2.2 sq mi) or 42.3% is forested. Of the rest of the land, 0.45 km² (0.17 sq mi) or 3.3% is settled (buildings or roads), 0.03 km² (7.4 acres) or 0.2% is either rivers or lakes and 0.02 km² (4.9 acres) or 0.1% is unproductive land.
Of the built up area, housing and buildings made up 1.4% and transportation infrastructure made up 1.7%. Out of the forested land, 36.8% of the total land area is heavily forested and 5.5% is covered with orchards or small clusters of trees. Of the agricultural land, 8.5% is used for growing crops and 9.6% is pastures and 36.0% is used for alpine pastures. All the water in the municipality is flowing water.
On 31 December 2009 District de Courtelary, the municipality's former district, was dissolved. On the following day, 1 January 2010, it joined the newly created Arrondissement administratif Jura bernois.
Coat of arms
The blazon of the municipal coat of arms is Or two Pales Azure and overall on a Bend Argent three Mullets of Five Gules.
Demographics
Cormoret has a population (as of December 2017) of 486. As of 2010, 3.3% of the population are resident foreign nationals. Over the last 10 years (2000-2010) the population has changed at a rate of -5.9%. Migration accounted for -2.4%, while births and deaths accounted for -1.9%.
Most of the population (as of 2000) speaks French (459 or 86.6%) as their first language, German is the second most common (62 or 11.7%) and Russian is the third (2 or 0.4%). There is 1 person who speaks Italian and 1 person who speaks Romansh.
As of 2008, the population was 47.7% male and 52.3% female. The population was made up of 236 Swiss men (46.4% of the population) and 7 (1.4%) non-Swiss men. There were 256 Swiss women (50.3%) and 10 (2.0%) non-Swiss women. Of the population in the municipality, 179 or about 33.8% were born in Cormoret and lived there in 2000. There were 181 or 34.2% who were born in the same canton, while 112 or 21.1% were born somewhere else in Switzerland, and 45 or 8.5% were born outside of Switzerland.
As of 2010, children and teenagers (0–19 years old) make up 23.2% of the population, while adults (20–64 years old) make up 58.5% and seniors (over 64 years old) make up 18.3%.
As of 2000, there were 218 people who were single and never married in the municipality. There were 248 married individuals, 43 widows or widowers and 21 individuals who are divorced.
As of 2000, there were 64 households that consist of only one person and 25 households with five or more people. In 2000, a total of 206 apartments (86.9% of the total) were permanently occupied, while 12 apartments (5.1%) were seasonally occupied and 19 apartments (8.0%) were empty. The vacancy rate for the municipality, in 2011, was 2.02%.
The historical population is given in the following chart:
Politics
In the 2011 federal election the most popular party was the Swiss People's Party (SVP) which received 29.6% of the vote. The next three most popular parties were the Social Democratic Party (SP) (23.8%), the Green Party (14.3%) and the Conservative Democratic Party (BDP) (11.3%). In the federal election, a total of 161 votes were cast, and the voter turnout was 42.9%.
Economy
As of 2011, Cormoret had an unemployment rate of 2.03%. As of 2008, there were a total of 121 people employed in the municipality. Of these, there were 30 people employed in the primary economic sector and about 10 businesses involved in this sector. 42 people were employed in the secondary sector and there were 8 businesses in this sector. 49 people were employed in the tertiary sector, with 14 businesses in this sector.
In 2008 there were a total of 99 full-time equivalent jobs. The number of jobs in the primary sector was 24, all in agriculture. The number of jobs in the secondary sector was 38 of which 33 or (86.8%) were in manufacturing and 5 (13.2%) were in construction. The number of jobs in the tertiary sector was 37. In the tertiary sector; 11 or 29.7% were in wholesale or retail sales or the repair of motor vehicles, 16 or 43.2% were in a hotel or restaurant, 3 or 8.1% were technical professionals or scientists, 4 or 10.8% were in education.
In 2000, there were 48 workers who commuted into the municipality and 167 workers who commuted away. The municipality is a net exporter of workers, with about 3.5 workers leaving the municipality for every one entering. Of the working population, 10.1% used public transportation to get to work, and 58.6% used a private car.
Religion
From the 2000 census, 95 or 17.9% were Roman Catholic, while 295 or 55.7% belonged to the Swiss Reformed Church. Of the rest of the population, there were 2 members of an Orthodox church (or about 0.38% of the population), and there were 112 individuals (or about 21.13% of the population) who belonged to another Christian church. There was 1 individual who was Islamic. There was 1 person who was Buddhist and 1 individual who belonged to another church. 61 (or about 11.51% of the population) belonged to no church, are agnostic or atheist, and 16 individuals (or about 3.02% of the population) did not answer the question.
Education
In Cormoret about 185 or (34.9%) of the population have completed non-mandatory upper secondary education, and 45 or (8.5%) have completed additional higher education (either university or a Fachhochschule). Of the 45 who completed tertiary schooling, 66.7% were Swiss men, 26.7% were Swiss women.
The Canton of Bern school system provides one year of non-obligatory Kindergarten, followed by six years of Primary school. This is followed by three years of obligatory lower Secondary school where the students are separated according to ability and aptitude. Following the lower Secondary students may attend additional schooling or they may enter an apprenticeship.
During the 2010-11 school year, there were a total of 62 students attending classes in Cormoret. There was one kindergarten class with a total of 19 students in the municipality. The municipality had 2 primary classes and 43 students. Of the primary students, 11.6% were permanent or temporary residents of Switzerland (not citizens) and 11.6% have a different mother language than the classroom language.
As of 2000, there were 24 students in Cormoret who came from another municipality, while 85 residents attended schools outside the municipality.
Cormoret is home to the Bibliothèque communale de Cormoret library. The library has (as of 2008) 2,208 books or other media, and loaned out 1,800 items in the same year. It was open a total of 76 days with average of 2 hours per week during that year. |
Books: Attending: Medicine, Mindfulness, and Humanity: Being Present. Ronald Epstein Scribner, 2017, HB, 304 pp, £18.99, 978-1501121715 This is one of those very rare books that I would recommend as essential reading for every GP. It shows us how we can reinvigorate our clinical practice, and make Cum Scientia Caritas a living, breathing reality. Its all there in the title. In the US and Canada, an attending physician is someone who has completed their clinical training and practises their medicine in a clinic or a hospital. But an attending physician, of course, is also someone who pays attention, who takes note of the needs and desires of their patients, and equally of their own feelings and reactions. |
# maa chudaaye duniya
d = [[0,1], [0, -1], [1, 0], [-1, 0], [0, 0]]
path = input()
vis = []
cur = [0, 0]
f = True
for p in path:
prev = cur
if p == 'L': index = 0
elif p == 'R' : index = 1
elif p == 'U' : index = 2
else: index = 3
cur = [cur[0] + d[index][0], cur[1] + d[index][1]]
if cur in vis:
f = False
print('BUG')
break
for dx, dy in d:
vis.append([prev[0] + dx, prev[1] + dy])
if f:
print('OK') |
Prediction of metabolisable energy value of broiler diets and water excretion from dietary chemical analyses. Thirty various pelleted diets were given to broilers (8/diet) for in vivo measurements of dietary metabolisable energy (ME) value and digestibilities of proteins, lipids, starch and sugars from day 27 to day 31, with ad libitum feeding and total collection of excreta. Water excretion was also measured. Amino acid formulation of diets was done on the basis of ratios to crude proteins. Mean in vivo apparent ME values corrected to zero nitrogen retention (AMEn) were always lower than the AMEn values calculated for adult cockerels using predicting equations from literature based on the chemical analyses of diets. The difference between mean in vivo AMEn values and these calculated AMEn values increased linearly with increasing amount of wheat in diets (P = 0.0001). Mean digestibilities of proteins, lipids and starch were negatively related to wheat introduction (P = 0.0001). The correlations between mean in vivo AMEn values and diet analytical parameters were the highest with fibre-related parameters, such as water-insoluble cell-walls (WICW) (r = -0.91) or Real Applied Viscosity (RAV) (r = -0.77). Thirteen multiple regression equations relating mean in vivo AMEn values to dietary analytical data were calculated, with R values ranging from 0.859 to 0.966 (P = 0.0001). The highest R values were obtained when the RAV parameter was included in independent variables. The direct regression equations obtained with available components (proteins, lipids, starch, sucrose and oligosaccharides) and the indirect regression equations obtained with WICW and ash parameters showed similar R values. Direct or indirect theoretical equations predicting AMEn values were established using the overall mean in vivo digestibility values. The principle of indirect equations was based on the assumption that WICW and ashes act as diluters. Addition of RAV or wheat content in variables improved the accuracy of theoretical equations. Efficiencies of theoretical equations for predicting AMEn values were almost the same as those of multiple regression equations. Water excretion was expressed either as the water content of excreta (EWC), the ratio of water excretion to feed intake (WIR) or the residual value from the regression equation relating water excretion to feed intake (RWE). The best regression predicting EWC was based on sucrose, fermentable sugars (lactose + oligosaccharides) and chloride variables, with positive coefficients. The best equations predicting WIR or RWE contained the sugar and chloride variables, with positive coefficients. Other variables appearing in these equations were AMEn or starch with negative coefficients, WICW, 'cell-wall-retained water', RAV or potassium with positive coefficients. |
static u8 const checksum[8] = {'p', 'a', 't', 'h', 't', 'r', 'a', 'c'};
//
//
//
struct Hit_Data {
s32 index;
r32 distance;
};
struct Material {
enum struct Type : u8 {
Diffuse,
Metal,
Dielectric,
};
Type type;
Vector3 albedo;
Vector3 emission;
r32 roughness;
r32 refractive_index;
Material & set_albedo(Vector3 value);
Material & set_emission(Vector3 value);
Material & set_roughness(r32 value);
Material & set_refractive_index(r32 value);
};
UNDERLYING_TYPE_META(Material::Type, u8)
IS_ENUM_META(Material::Type)
struct Shape {
enum struct Type : u8
{
Sphere,
Aabb,
};
Type type;
union {
Aabb3 aabb;
Vector4 sphere;
};
};
UNDERLYING_TYPE_META(Shape::Type, u8)
IS_ENUM_META(Shape::Type)
struct Game_Data {
u8 checksum[8];
u32 random_state;
Vector3 camera_position;
Quaternion camera_rotation;
Array_Dynamic<Shape> shapes;
Array_Dynamic<Material> materials;
};
//
//
//
Material mat0(Material::Type type) { return { type, 0 }; }
Material & Material::set_albedo(Vector3 value) { albedo = value; return *this; }
Material & Material::set_emission(Vector3 value) { emission = value; return *this; }
Material & Material::set_roughness(r32 value) { roughness = value; return *this; }
Material & Material::set_refractive_index(r32 value) { refractive_index = value; return *this; }
Shape aabb(Aabb3 value) { Shape s = {Shape::Type::Aabb, 0}; s.aabb = value; return s; }
Shape sphere(Vector4 value) { Shape s = {Shape::Type::Sphere, 0}; s.sphere = value; return s; }
//
// funcitons
//
namespace camera {
void reset_exposure(Game_Data * game_data) {
if (!input::get_current(Pointer_Keys::Key1)) {
return;
}
globals::render_buffer_f.exposure = 0;
clear_buffer(globals::render_buffer_f, {0, 0, 0, 0});
}
void process_input_rotation(Game_Data * game_data) {
if (!input::get_current(Pointer_Keys::Key1)) {
return;
}
Vector2i pointer_delta = input::get_pointer_delta();
game_data->camera_rotation = quaternion_multiply(
game_data->camera_rotation,
quaternion_from_radians(
vec3((r32)-pointer_delta.y, (r32)pointer_delta.x, 0) * deg_to_rad
)
);
}
void process_input_position(Game_Data * game_data) {
if (!input::get_current(Pointer_Keys::Key1)) {
return;
}
r32 delta_time = globals::get_delta_seconds();
r32 speed = input::get_current(Keyboard_Keys::Shift) * 3 + 1.0f;
r32 left = input::get_current(Keyboard_Keys::A) * speed;
r32 right = input::get_current(Keyboard_Keys::D) * speed;
r32 forward = input::get_current(Keyboard_Keys::W) * speed;
r32 back = input::get_current(Keyboard_Keys::S) * speed;
r32 down = input::get_current(Keyboard_Keys::Q) * speed;
r32 up = input::get_current(Keyboard_Keys::E) * speed;
Vector3 offset = {right - left, up - down, forward - back};
offset = quaternion_rotate_vector(game_data->camera_rotation, offset);
game_data->camera_position = game_data->camera_position + offset * delta_time;
}
}
|
EXTH-23. PRECLINICAL EFFICACY OF A TARGETED, BRAIN PENETRANT INHIBITOR OF FATTY ACID DESATURATION IN GLIOBLASTOMA There is increasing evidence that targeting de novo fatty acid synthesis could be effective for the treatment of primary and metastatic brain tumors, but brain penetrant inhibitors of this pathway are still lacking. We have previously reported that Stearoyl CoA Desaturase (SCD), a desaturase enzyme which converts saturated fatty acids into their unsaturated counterparts is a therapeutic target in glioblastoma (GBM). We showed that SCD exerts a cytoprotective role by protecting GBM cells against lipotoxicity and is essential for maintaining self-renewal and tumor initiating properties in GBM stem cells (GSCs). Using a panel of patient derived GSCs, mouse orthotopic GSC models, isogenic astrocytes models of gliomagenesis as well as in silico analysis, we report that in addition to SCD1, a second SCD isoform is also essential for GSCs maintenance. Further we demonstrate that while EGFR/PI3K/AKT signaling promotes lipogenesis, the activation of RAS/MEK/ERK signaling creates a metabolic vulnerability and sensitizes to SCD inhibitors. We tested a first-in-class, clinically relevant SCD inhibitor and showed that this compound effectively inhibits fatty acid desaturation in GSCs in a mouse orthotopic GSC model. Importantly, using different GSCs mouse models, we demonstrate that this SCD inhibitor can effectively increase overall survival as a monotherapy. Further, SCD inhibition impairs DNA-damage repair via homologous recombination thereby sensitizing to the standard of care therapeutics for GBM, ionizing radiation and temozolomide (TMZ). Consequently, combining this inhibitor with TMZ in mouse orthotopic GSC models leads to a significantly increased overall survival. Altogether, our results provide a deeper understanding of de novo fatty acid dynamics in GBM and support the clinical testing of a new SCD inhibitor, with favorable pharmacokinetic and pharmacodynamic properties, in patients diagnosed with GBM. |
package daemons
import (
"fmt"
"log"
"os"
"time"
gostatgrab "../gostatgrab"
)
var (
AppLog *log.Logger
SystemLog *log.Logger
DebugLog *log.Logger
ExpLog *log.Logger
AppLogFileHandler *os.File
SystemLogFileHandler *os.File
DebugLogFileHandler *os.File
ExpLogFileHandler *os.File
)
// SetupLog creates a log file as processname/filename
func SetupLog(filename string) (*log.Logger, *os.File) {
var logpath string
logpath = MakeAndGetLogFilepath(filename)
var file, err1 = os.OpenFile(logpath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err1 != nil {
panic(err1)
} else {
fmt.Println("error free logpath set up ", filename, logpath)
}
return log.New(file, "", 0), file
}
func MakeAndGetLogFilepath(filename string) string {
folder := DEFAULT_FOLDER_TO_STORE
var LogFile string = ""
if processParams.remoteConfiguration {
err := os.MkdirAll("/tmp/logs/", 0777)
if err != nil {
AppLog.Panicln("ERROR : Cannot create process folder : " + "/tmp/logs/")
}
LogFile = "/tmp/logs/" + filename
} else {
fmt.Println("I am here")
err := os.MkdirAll(folder+"/"+processParams.name+"/logs/", 0777)
if err != nil {
AppLog.Panicln("ERROR : Cannot create process folder : " + folder + "/" + processParams.name + "/logs/")
}
LogFile = folder + "/" + processParams.name + "/logs/" + filename
}
return LogFile
}
func GetLogFilepath(filename string) string {
folder := DEFAULT_FOLDER_TO_STORE
var LogFile string = ""
if processParams.remoteConfiguration {
LogFile = "/tmp/logs/" + filename
} else {
LogFile = folder + "/" + processParams.name + "/logs/" + filename
}
return LogFile
}
//LogStats : this function logs statistics system
func LogStats() {
var networkInBytes uint64 = 0
var networkOutBytes uint64 = 0
var networkInPackets uint64 = 0
var networkOutPackets uint64 = 0
var pnetworkInBytes uint64 = 0
var pnetworkOutBytes uint64 = 0
var pnetworkInPackets uint64 = 0
var pnetworkOutPackets uint64 = 0
var ready bool = false
var interval time.Duration = time.Duration(appParams.LogInterval)
for {
memstats, err := gostatgrab.GetMemStats()
cpupercent, err1 := gostatgrab.GetCpuPercents()
networkstats, err2 := gostatgrab.GetNetworkIoStats()
if err != nil || err1 != nil || err2 != nil {
continue
}
networkInBytes = 0
networkOutBytes = 0
networkInPackets = 0
networkOutPackets = 0
for _, networkstat := range networkstats {
networkInBytes += networkstat.ReadBytes
networkOutBytes += networkstat.WriteBytes
networkInPackets += networkstat.ReadPackets
networkOutPackets += networkstat.WritePackets
}
instant := time.Now()
/*
fmt.Printf("Time: %v\n", int64(instant.UnixNano()/1e9) - pinstant)
fmt.Printf("CPUPercent: %.2f\n", cpupercent.User)
fmt.Printf("MemTotal (MB): %d\n", memstats.Total/(1024*1024))
fmt.Printf("MemUsed (MB): %d\n", memstats.Used/(1024*1024))
fmt.Printf("NetInBytes (KB): %d\n", (networkInBytes - pnetworkInBytes)/1024)
fmt.Printf("NetOoutbytes (KB): %d\n", (networkOutBytes -pnetworkOutBytes)/1024)
fmt.Printf("NetInPackets: %d\n", networkInPackets - pnetworkInPackets)
fmt.Printf("NetOutPackets: %d\n", networkOutPackets - pnetworkOutPackets)
fmt.Printf("\n")
*/
if ready {
SystemLog.Printf("%v %.2f %d %d %d %d %d %d\n",
(int64(instant.UnixNano() / 1e3)),
cpupercent.User,
memstats.Total/(1024*1024),
memstats.Used/(1024*1024),
((networkInBytes - pnetworkInBytes) / 1024),
((networkOutBytes - pnetworkOutBytes) / 1024),
(networkInPackets - pnetworkInPackets),
(networkOutPackets - pnetworkOutPackets))
}
ready = true
time.Sleep(interval * time.Second)
pnetworkInBytes = networkInBytes
pnetworkOutBytes = networkOutBytes
pnetworkInPackets = networkInPackets
pnetworkOutPackets = networkOutPackets
}
}
//DoEvery function repeats itself every d time Duration
func DoEvery(d time.Duration, f func()) {
for _ = range time.Tick(d) {
f()
}
}
func initLogHandlers() {
AppLog, AppLogFileHandler = SetupLog(APPLOGFILE)
SystemLog, SystemLogFileHandler = SetupLog(SYSTEMLOGFILE)
DebugLog, DebugLogFileHandler = SetupLog(DEBUGLOGFILE)
ExpLog, ExpLogFileHandler = SetupLog(EXPLOGFILE)
}
/*
func initUpLogHandlers_new(processName string) {
AppLog = SetupLog_new(APPLOGFILE, processName)
SystemLog = SetupLog_new(SYSTEMLOGFILE, processName)
DebugLog = SetupLog_new(DEBUGLOGFILE, processName)
ExpLog = SetupLog_new(EXPLOGFILE, processName)
}
*/
|
The Dynamic Diversity of Latin American Party Systems Most Latin American party systems change so often and in so many respects that the `typical' party system of each country can be described only in imprecise terms, if at all. However, the nature of party systems as they are defined in individual elections can be described in rich and fairly reliable detail. This article compares the party systems of 20th-century Latin America election by election through indicators of fragmentation, volatility, personalism, ideological clarity, mean left-right tendency and polarization. The data cover approximately 150 lower or single-house legislative elections in 20th-century Argentina, Bolivia, Brazil, Chile, Colombia, Costa Rica, Ecuador, Mexico, Peru, Uruguay and Venezuela. |
Evidence on hereditary occurrence of placental anastomoses in heterosexual twins in sheep. Pedigree analysis of 22 heterosexual litters demonstrating the leukocyte chimerism (XX/XY) was carried out. The analysis revealed the familial occurrence of the chimerism in 15 litters. Eight litters were obtained from four ewes and the other seven ones were sired by three rams. Moreover, it was shown that ten chimeric litters had a common male ancestor. The coefficients of inbreeding ranged from 0.0 to 0.0625, with a mean of 0.019. These results suggest that the development of placental anastomoses in sheep is genetically controlled. A possible mode of the genetic control is discussed. |
<reponame>futurewarning/pyro
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch.nn.functional as F
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.likelihoods.likelihood import Likelihood
def _softmax(x):
return F.softmax(x, dim=-1)
class MultiClass(Likelihood):
"""
Implementation of MultiClass likelihood, which is used for multi-class
classification problems.
MultiClass likelihood uses :class:`~pyro.distributions.Categorical`
distribution, so ``response_function`` should normalize its input's rightmost axis.
By default, we use `softmax` function.
:param int num_classes: Number of classes for prediction.
:param callable response_function: A mapping to correct domain for MultiClass
likelihood.
"""
def __init__(self, num_classes, response_function=None):
super().__init__()
self.num_classes = num_classes
self.response_function = _softmax if response_function is None else response_function
def forward(self, f_loc, f_var, y=None):
r"""
Samples :math:`y` given :math:`f_{loc}`, :math:`f_{var}` according to
.. math:: f & \sim \mathbb{Normal}(f_{loc}, f_{var}),\\
y & \sim \mathbb{Categorical}(f).
.. note:: The log likelihood is estimated using Monte Carlo with 1 sample of
:math:`f`.
:param torch.Tensor f_loc: Mean of latent function output.
:param torch.Tensor f_var: Variance of latent function output.
:param torch.Tensor y: Training output tensor.
:returns: a tensor sampled from likelihood
:rtype: torch.Tensor
"""
# calculates Monte Carlo estimate for E_q(f) [logp(y | f)]
f = dist.Normal(f_loc, f_var.sqrt())()
if f.dim() < 2:
raise ValueError("Latent function output should have at least 2 "
"dimensions: one for number of classes and one for "
"number of data.")
# swap class dimension and data dimension
f_swap = f.transpose(-2, -1) # -> num_data x num_classes
if f_swap.size(-1) != self.num_classes:
raise ValueError("Number of Gaussian processes should be equal to the "
"number of classes. Expected {} but got {}."
.format(self.num_classes, f_swap.size(-1)))
if self.response_function is _softmax:
y_dist = dist.Categorical(logits=f_swap)
else:
f_res = self.response_function(f_swap)
y_dist = dist.Categorical(f_res)
if y is not None:
y_dist = y_dist.expand_by(y.shape[:-f.dim() + 1]).to_event(y.dim())
return pyro.sample(self._pyro_get_fullname("y"), y_dist, obs=y)
|
Bayesian learning of biological pathways on genomic data assimilation MOTIVATION Mathematical modeling and simulation, based on biochemical rate equations, provide us a rigorous tool for unraveling complex mechanisms of biological pathways. To proceed to simulation experiments, it is an essential first step to find effective values of model parameters, which are difficult to measure from in vivo and in vitro experiments. Furthermore, once a set of hypothetical models has been created, any statistical criterion is needed to test the ability of the constructed models and to proceed to model revision. RESULTS The aim of our research is to present a new statistical technology towards data-driven construction of in silico biological pathways. The method starts with a knowledge-based modeling with hybrid functional Petri net. It then proceeds to the Bayesian learning of model parameters for which experimental data are available. This process exploits quantitative measurements of evolving biochemical reactions, e.g. gene expression data. Another important issue that we consider is statistical evaluation and comparison of the constructed hypothetical pathways. For this purpose, we have developed a new Bayesian information-theoretic measure that assesses the predictability and the biological robustness of in silico pathways. AVAILABILITY The FORTRAN source codes are available at the URL http://daweb.ism.ac.jpyoshidar/GDA/ SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online. |
Evaluation of the PROMIS pediatric global health scale (PGH-7) in children with asthma ABSTRACT Objective: To evaluate the reliability and validity of the PROMIS Pediatric Global Health scale, a 7-item measure of perceived physical, mental, and social health, in children with asthma. Methods: From February 2014 to February 2015, convenience samples of 817 year-old children (n = 182) and parents of 517 year-old children (n = 328) visiting an emergency department for treatment of asthma were enrolled. The Asthma Control Test was used to characterize children as controlled versus not controlled, and the PROMIS Asthma Impact Scale was used to assess asthma symptoms' impact on functional status. We conducted longitudinal analyses among 92 children and 218 parents at 3 weeks, and 74 children and 171 parents at 8 weeks after enrollment. Results: The PGH-7 reliability ranged from 0.66 to 0.81 for child-report and 0.76 to 0.82 for parent-proxy. In cross-sectional analyses, children with controlled asthma had PGH-7 scores 0.400.95 standard deviation units higher than those who were uncontrolled. The PGH-7 was responsive to changes in overall general health between time points, with moderate effect sizes (0.50.6 standard deviation units). In longitudinal analyses, PGH-7 scores were no different between those who stayed uncontrolled versus became controlled at 3 weeks of follow-up; however, by 8 weeks of follow-up, the differences between these groups were 0.70.8 standard deviation units, indicative of large effects. Conclusions: The PGH-7 is a reliable and valid patient-reported outcome for assessing general health among children with asthma. It is a useful complement to other asthma-specific outcome measures. |
#include "FadalightAdaptiveMesh/basicadaptivemesh2d.hpp"
#include "FadalightAdaptiveMesh/refiner2d.hpp"
// #include "FadalightMesh/quadtotri.hpp"
#include "FadalightMesh/refineinfo.hpp"
#include <limits>
using namespace FadalightAdaptiveMesh;
using namespace std;
/*--------------------------------------------------------------------------*/
void Refiner2d::globalRefine(int nref)
{
// std::cerr<<"Refiner2d::globalRefine "<<nref<<'\n';
// if(nref == 0)
// {
// return;
// }
for(int i = 0; i < nref; i++)
{
int comp=0;
for(face_leafpointer f = _getFaces().begin_leaf(); f != _getFaces().end_leaf(); f++)
{
_markCellToRefine( f );
}
_refine();
_constructAdaptInfo();
_getMesh()->reInitFadalightMesh();
}
// if( _getMesh()->quadToTri() )
// {
// _refineQuadToTri();
// }
}
/*--------------------------------------------------------------------------*/
void Refiner2d::_updateCopyEdge()
{
int ie = 0;
for(face_leafpointer f = _getFaces().begin_leaf(); f != _getFaces().end_leaf(); f++)
{
FaceInterface* F = *f;
for(int ii = 0; ii < F->NumberOfEdges(); ii++)
{
edge_pointer E = F->edge(ii);
const face_pointer N = _neighbour(f, E);
if(N == NULL)
{
if( ( *E )->copy()>0)
{
std::cerr<<"updatCopy ref"<<( *E )->boundaryid()<<'\n';
}
( *E )->copy() = 0;
continue;
}
int nchild = _getFaces().number_of_children(N);
if(nchild == 0)
{
( *E )->copy() = 0;
}
else
{
( *E )->copy() = 1;
ie++;
}
}
}
}
/*--------------------------------------------------------------------------*/
void Refiner2d::adaptMesh(std::string filename)
{
alat::armaivec marked_fadalightmesh_cells;
std::ifstream file( filename.c_str() );
if( !file.is_open() )
{
std::cerr<<"*** ERROR BasicRefiner2d::refine() cannot open file \""<<filename<<"\"\n";
exit(5);
}
marked_fadalightmesh_cells.load(file);
file.close();
for(int i = 0; i < marked_fadalightmesh_cells.size(); i++)
{
int imarked = marked_fadalightmesh_cells[i];
face_pointer f = _getCellMap2d()[imarked];
if(not *f)
{
std::cerr << "*** ERROR in Refiner2d::adaptMesh(): face_pointer NULL\n";
assert(0);
exit(1);
}
_markCellToRefine( f );
}
_makeRegular();
_refine();
_updateCopyEdge();
_constructAdaptInfo();
_getMesh()->updateHangingInfo();
_getMesh()->reInitFadalightMesh();
}
/*--------------------------------------------------------------------------*/
void Refiner2d::_refine()
{
// const FadalightMesh::CurvedBoundaryInformation* curvedboundaryinformation = _getCurvedBoundaries();
_getCellMapOk() = false;
_getNumberingOk() = false;
// put nodes on boundary
// if(curvedboundaryinformation)
// {
// for(edge_leafpointer e = _getEdges().begin_leaf(); e != _getEdges().end_leaf(); e++)
// {
// int color = ( *e )->boundaryid();
// if( curvedboundaryinformation->boundaryColorIsCurved(color) )
// {
// const FadalightMesh::CurvedBoundaryDescriptionInterface* BDI = curvedboundaryinformation->get(color);
// for(int ii = 0; ii < 2; ii++)
// {
// BDI->newton( ( *e )->node(ii)->getNode() );
// }
// }
// }
// }
//edge refinement
for(edge_leafpointer e = _getEdges().begin_leaf(); e != _getEdges().end_leaf(); e++)
{
if( !( ( *e )->to_refine() ) )
{
continue;
}
_edgerefiner.refine( e, _getLastNodeId(), _getLastEdgeId() );
std::vector<Node*>& newnodes = _edgerefiner.getNewNodes();
std::vector<Edge*>& newedges = _edgerefiner.getNewEdges();
// if(curvedboundaryinformation)
// {
// int color = ( *e )->boundaryid();
// if( curvedboundaryinformation->boundaryColorIsCurved(color) )
// {
// const FadalightMesh::CurvedBoundaryDescriptionInterface* BDI = curvedboundaryinformation->get(color);
// for(int ii = 0; ii < newnodes.size(); ii++)
// {
// BDI->newton( newnodes[ii]->getNode() );
// }
// }
// }
for(std::vector<Node*>::iterator it = newnodes.begin(); it != newnodes.end(); it++)
{
_getNodes().insert(*it);
}
for(std::vector<Edge*>::iterator it = newedges.begin(); it != newedges.end(); it++)
{
_getEdges().append_child(e, *it);
}
}
// face refinement
face_leafpointer it_begin = _getFaces().begin_leaf();
face_leafpointer it_end = _getFaces().end_leaf();
for(face_leafpointer f = it_begin; f != it_end; f++)
{
if( !( ( *f )->to_refine() ) )
{
continue;
}
if( !( _facerefiner->refine( *f, _getEdges(), _getLastEdgeId() ) ) )
{
continue;
}
std::vector<Node*>& newnodes = _facerefiner->getNewNodes();
std::vector<FaceInterface*>& newfaces = _facerefiner->getNewFaces();
// for(int i=0;i<newnodes.size();i++)
// {
// std::cerr << "???? newnodes " << newnodes[i]->getNode() << "\n";
// }
for(std::vector<Node*>::iterator it = newnodes.begin(); it != newnodes.end(); it++)
{
( *it )->id() = ++_getLastNodeId();
_getNodes().insert(*it);
}
for(std::vector<FaceInterface*>::iterator it = newfaces.begin(); it != newfaces.end(); it++)
{
face_pointer newf = _getFaces().append_child(f, *it);
for(int ii = 0; ii < ( *newf )->NumberOfEdges(); ii++)
{
edge_pointer enewf = ( *newf )->edge(ii);
( *enewf )->face( ( *enewf )->nfaces ) = newf;
( *enewf )->nfaces++;
}
( *it )->id() = ++_getLastFaceId();
( *it )->depth() = ( *f )->depth()+1;
}
}
}
/*--------------------------------------------------------------------------*/
void Refiner2d::_constructAdaptInfo()
{
_getMesh()->createGeometryObject("RefineInfo");
_getMesh()->constructNumbering();
int n = _getCellMap2d().size();
alat::Vector<alat::armaivec> SPC(n), SPN(n), SPE(n);
for(int i = 0; i < n; i++)
{
face_pointer p = _getCellMap2d()[i];
alat::armaivec& cells = SPC[i];
alat::armaivec& nodes = SPN[i];
alat::armaivec& edges = SPE[i];
int nchild = _getFaces().number_of_children(p);
if(nchild == 0)
{
assert( _getFaceId2Id().find( ( *p )->id() ) != _getFaceId2Id().end() );
cells.set_size(1);
cells[0] = _getFaceId2Id()[( *p )->id()];
int pnnodes = ( *p )->NumberOfNodes();
assert(pnnodes==4);
nodes.set_size( pnnodes );
for(int in = 0; in < pnnodes; in++)
{
nodes[in] = _getNodeId2Id()[( *p )->node(in)->id()];
}
int pnedges = ( *p )->NumberOfEdges();
assert(pnedges==4);
edges.set_size( pnedges );
for(int is = 0; is < pnedges; is++)
{
assert( _getEdgeId2Id().find( ( *( *p )->edge(is) )->id() ) != _getEdgeId2Id().end() );
edges[is] = _getEdgeId2Id()[( *( *p )->edge(is) )->id()];
}
}
else
{
cells.set_size(nchild);
int countnode=0, countedge=0;
alat::IntMap newnodstoid, newedgetoid;
for(int ic = 0; ic < nchild; ic++)
{
face_pointer f = _getFaces().child(p, ic);
assert( _getFaceId2Id().find( ( *f )->id() ) != _getFaceId2Id().end() );
cells[ic] = _getFaceId2Id()[( *f )->id()];
for(int in = 0; in < ( *f )->NumberOfNodes(); in++)
{
int icand = _getNodeId2Id()[( ( *f )->node(in) )->id()];
if(newnodstoid.find(icand)==newnodstoid.end())
{
newnodstoid[icand] = countnode++;
}
// if( std::find( nodes.begin(), nodes.end(), _getNodeId2Id()[( *f )->node(in)->id()] ) == nodes.end() )
// {
// countnode++;
// }
}
for(int is = 0; is < ( *f )->NumberOfEdges(); is++)
{
assert( _getEdgeId2Id().find( ( *( *f )->edge(is) )->id() ) != _getEdgeId2Id().end() );
int icand = _getEdgeId2Id()[( *( *f )->edge(is) )->id()];
if(newedgetoid.find(icand)==newedgetoid.end())
{
newedgetoid[icand] = countedge++;
}
// if( std::find(edges.begin(), edges.end(), icand) == edges.end() )
// {
// countedge++;
// }
}
}
// std::cerr << "Refiner2d::_constructAdaptInfo() countnode="<<countnode<<" countedge="<<countedge<<"\n";
nodes.set_size(countnode);
edges.set_size(countedge);
for(alat::IntMap::const_iterator p = newnodstoid.begin(); p!=newnodstoid.end(); p++)
{
nodes[p->second] = p->first;
}
for(alat::IntMap::const_iterator p = newedgetoid.begin(); p!=newedgetoid.end(); p++)
{
edges[p->second] = p->first;
}
// countnode = countedge = 0;
// for(int ic = 0; ic < nchild; ic++)
// {
// face_pointer f = _getFaces().child(p, ic);
// for(int in = 0; in < ( *f )->NumberOfNodes(); in++)
// {
// int icand = _getNodeId2Id()[( ( *f )->node(in) )->id()];
// if( std::find( nodes.begin(), nodes.end(), icand ) == nodes.end() )
// {
// nodes[countnode++] = icand;
// // nodes.push_back( _getNodeId2Id()[( *f )->node(in)->id()] );
// }
// }
// for(int is = 0; is < ( *f )->NumberOfEdges(); is++)
// {
// int icand = _getEdgeId2Id()[( *( *f )->edge(is) )->id()];
// if( std::find(edges.begin(), edges.end(), icand) == edges.end() )
// {
// edges[countedge++] = icand;
// // edges.push_back(icand);
// }
// }
// }
}
// std::cerr << "nodes " << nodes << "\n";
// SPC[i] = cells;
// SPN[i] = nodes;
// SPE[i] = edges;
}
FadalightMesh::RefineInfo* _refineinfo = dynamic_cast<FadalightMesh::RefineInfo*>( _getMesh()->getGeometryObject("RefineInfo") );
alat::SparsityPattern& coarsenodeids = _refineinfo->getCoarseNodes();
alat::SparsityPattern& coarsecellids = _refineinfo->getCoarseCells();
alat::SparsityPattern& coarsesideids = _refineinfo->getCoarseSides();
// std::cerr << "##################### _getNodeId2Id()\n" << _getNodeId2Id() << "\n";
//
alat::armaivec& nodeids = _refineinfo->getNodeIds();
nodeids.set_size(_getNodeId2Id().size());
for(alat::IntMap::const_iterator p = _getNodeId2Id().begin(); p!= _getNodeId2Id().end(); p++)
{
nodeids[p->first] = p->second;
}
coarsenodeids.set_size(SPN);
coarsecellids.set_size(SPC);
coarsesideids.set_size(SPE);
//
// std::cerr << "#####################\n";
// coarsenodeids.print(std::cerr);
// coarsenodeids.write("toto");
//
// alat::SparsityPattern coarsenodeids2;
// coarsenodeids2.read("toto");
// std::cerr << "#####-----------######\n";
// coarsenodeids2.print(std::cerr);
//
_refineinfo->refinfoinfonode[0] = 6;
_refineinfo->refinfoinfonode[1] = 1;
_refineinfo->refinfoinfonode[2] = 4;
_refineinfo->refinfoinfonode[3] = 8;
_refineinfo->refinfoinfoside[0] = 0;
_refineinfo->refinfoinfoside[1] = 2;
_refineinfo->refinfoinfoside[2] = 5;
_refineinfo->refinfoinfoside[3] = 7;
_refineinfo->refinfoinfocell[0] = 1;
_refineinfo->refinfoinfocell[1] = 2;
_refineinfo->refinfoinfocell[2] = 0;
_refineinfo->refinfoinfocell[3] = 3;
}
/*--------------------------------------------------------------------------*/
void Refiner2d::_markCellToRefine(const face_pointer f)
{
int ne = ( *f )->NumberOfEdges();
( *f )->to_refine() = 1;
if(_getMesh()->getClassName() == "FadalightMesh::TriangleMesh")
{
face_pointer pf = _getFaces().parent(f);
if(pf != NULL)
{
int nchild = _getFaces().number_of_children(pf);
if(nchild == 4)
{
if( f == ( _getFaces().child(pf, 3) ) )
{
for(int i = 0; i < 3; i++)
{
face_pointer childpf = _getFaces().child(pf, i);
( *childpf )->to_refine() = 1;
int nepf = ( *childpf )->NumberOfEdges();
for(int i = 0; i < nepf; i++)
{
( *( ( *childpf )->edge(i) ) )->nref = 2;
( *( ( *childpf )->edge(i) ) )->to_refine() = 1;
}
}
}
}
}
}
for(int i = 0; i < ne; i++)
{
( *( ( *f )->edge(i) ) )->nref = 2;
( *( ( *f )->edge(i) ) )->to_refine() = 1;
}
}
/*--------------------------------------------------------------------------*/
void Refiner2d::_makeRegular()
{
std::map<int, FaceSet> _nodeid2cell;
_nodeid2cell.clear();
for(face_leafpointer fp = _getFaces().begin_leaf(); fp != _getFaces().end_leaf(); fp++)
{
for(int ii = 0; ii < ( *fp )->NumberOfNodes(); ii++)
{
_nodeid2cell[( ( *fp )->node(ii) )->id()].insert(fp);
}
}
while(1)
{
int additional_refine = 0;
for(std::map<int, FaceSet>::iterator np = _nodeid2cell.begin(); np != _nodeid2cell.end(); np++)
{
const FaceSet& faces_of_node = np->second;
// int nlevelsignore = INT_MAX;
// int maxlevel = INT_MIN;
int nlevelsignore = numeric_limits<int>::max();
int maxlevel = numeric_limits<int>::min();
for(FaceSet::const_iterator q = faces_of_node.begin(); q != faces_of_node.end(); q++)
{
int newdepth = _getFaces().depth(*q);
if( ( *( *q ) )->to_refine() == 1 )
{
newdepth += 1;
}
nlevelsignore = std::min(nlevelsignore, newdepth);
maxlevel = std::max(maxlevel, newdepth);
}
if(maxlevel > nlevelsignore+2)
{
for(FaceSet::const_iterator q = faces_of_node.begin(); q != faces_of_node.end(); q++)
{
int newdepth = _getFaces().depth(*q);
std::cerr<<"--- "<<newdepth<<" "<<( *( *q ) )->to_refine()<<std::endl;
}
assert(0);
}
else if(maxlevel == nlevelsignore+2)
{
for(FaceSet::const_iterator q = faces_of_node.begin(); q != faces_of_node.end(); q++)
{
int newdepth = _getFaces().depth(*q);
if( ( *( *q ) )->to_refine() == 1 )
{
newdepth += 1;
}
if(newdepth == nlevelsignore)
{
if( ( *( *q ) )->to_refine() != 0 )
{
for(FaceSet::const_iterator q = faces_of_node.begin(); q != faces_of_node.end(); q++)
{
int newdepth = _getFaces().depth(*q);
std::cerr<<"--- "<<newdepth<<" "<<( *( *q ) )->to_refine()<<std::endl;
}
assert(0);
}
additional_refine++;
_markCellToRefine( ( *q ) );
}
}
}
}
if(additional_refine == 0)
{
break;
}
}
}
|
At CinemaCon last week, Michael Bay was asked if he was actually going to be leaving the Transformers franchise this time. He not only said that he’d like to do one more, but that there were already 14 stories ready to go in that universe.
Speaking with MTV’s Josh Horowitz, Bay answered the question about whether or not he was leaving by waffling a bit. He then said that there would be Easter eggs in The Last Knight that would connect to “different things to come.”
Advertisement
Bay’s most alarming response was to questions about the spinoffs planned to turn Transformers into a bona fide cinematic universe. The director said, “There are fourteen stories written. And there’s good stuff. So, I would like to do one of them, though.”
That is, to put it mildly, a lot of Transformers. It does make a little bit of sense, though. Transformers put together a writers room in order to churn out a bunch of new ideas. Presumably, they are going to pick the “best” ones to go forward and not just make, oh holy God, all fourteen. But who knows? They could all be gold! Or, at least, as good as the rest of this franchise.
Plus, at least one of them seems to be tempting Bay to stick around. Not that anyone believes he’s leaving for real anymore. Not since he goes around before every single movie saying it’s his last before jumping right back into the director’s chair.
Advertisement
Fourteen Transformers movies. My retinas won’t be able to handle that many explosions. |
Probabilistic analysis of voltage bands stressed by electric mobility This contribution proposes a method to analyze the capabilities of low voltage grids to meet the demands of additional loads due to electric mobility. Probabilistic load models for both the domestic and electric vehicle loads are developed to give insight into the stochastic nature of load distribution and voltage bands. The results not only provide the maximum voltage deviations, but their probability of occurrence during a given period of time. With this information, a recommendation for future grid planning can be developed, which takes into account the increasing load caused by electric mobility. Furthermore, a load management system is proposed to reduce maximum load thus avoiding costs for increasing feeder capacities. |
Who would be in your top ten?
Like a fine wine, some women just get better with age. We’ve compiled a list of the hottest women over 40 who still give their younger counterparts a run for their money.
10. Elle MacPherson, 44.
Nicknamed “the body” in her modeling days, now a business woman and Director of a surfwear company called Hot Tuna. The first supermodel to have postage stamps made using her image. Divorced and recently single and looking for Mr Right..
9. Teri Hatcher, 43.
Rose to fame in the New Adventures of Superman, playing Lois Lane. Now plays Susan Mayer in Desperate Housewives and won a golden globe for that role. Also divorced and single.
8. Pamela Anderson, 41.
Found fame playing the part as CJ in Baywatch, being a playboy model and of course that video with Tommy Lee. Married three times, may be single when this reached the press, could be back with any number of rocker-ex boyfriends or husbands.
7. Kim Cattrall, 52.
Brit born actress has been in many movies, most famous for her role as Samantha in Sex and The City and appearances in the Police Academy series. Married three times and wrote a book about orgasms with an ex-husband. Currently single.
6. Elizabeth Hurley, 43.
More famous for dating Hugh Grant and wearing THAT dress. Just make sure you don’t watch her movies or it might detract from this foxy English rose. Married to Arun Nayar, still.
5. Sharon Stone, 50.
Actress that has showed more than most. Basic Instinct being the film that made her an A-List celebrity, not forgetting the scene with the uncrossing of legs. Has hinted a being bisexual on many occasions, is now divorced and single.
4. Michelle Pfeiffer, 50.
Perhaps her sexiest role was that of Catwoman in the film Batman returns, her least remembered role was in Grease 2. Married twice, and still married to the creator of Ally McBeal, David Kelley.
3. Kylie Minogue, 40.
The singer and actress first shot to fame in Neighbours and reached global fame with the song “can’t get you out of my head”. Had breast cancer and took a year off to recover. Famously dated Michael Hutchence, is now however, single and looking for Mr Right and have babies.
2. Halle Berry, 42.
Been gracing our screens in a wide range of movies, a firm favourite and too many sexy roles to mention here, apart from maybe that Bond scene on the beach. Married twice, divorced twice, now happy with a guy ten years her junior and the father of her child. Said to be happy and planning a second child.
1. Monica Belucci, 44.
Many films in many languages, was in two of the Matrix films, recently said “I love the idea that when a man pays to see one of my films, he’s paying me to feel pleasure. That’s good enough for us to get her to the number 1 spot. She is married.
Did we miss anyone in your opinion?? If so, let us know in the comments.
Advert
““We guarantee you’ll find someone special within 6 months. Don’t wait. Make Love Happen now! “” |
In recent years, with the great progress of mobile telecommunication, digital mobile products of all kinds, such as mobile phone, laptop, PDA and so on have been the important part of human daily life. Therefore, the requirements of the wireless Internet such as receiving/sending e-mail immediately and obtaining real-time information are increasing vastly. And how to connect the wireless telecommunication with the Internet thereof becomes an important research topic in nowadays. Please refer to FIG. 1, which is a schematic diagram showing the wireless Internet scheme. To get resources in the Internet, the user needs a point-to-point link to download and upload information. In fact, from the viewpoint of the wireless Internet, a point-to-point link can be divided into two parts: the wireless telecommunication from the mobile unit to the base station, and the network communication from the base station to the Internet. Wherein, in the part of the wireless telecommunication, the data will be transmitted from the mobile product to the base station and received from the base station via wireless. It's a direct point-to-point connection. However, in the part of the line network connection, the data are transmitted to the Internet via the present framework such as telephone network and optical fiber communication, and then sent to the remote terminals such as server, personal computer, work station and so on. The network connection needs more network control information for each of the connections.
In fact, the most common point-to-point transmission protocol in the Internet is the Transmission Control Protocol/Internet Protocol (TCP/IP), which is built in the traditional point-to-point transmission protocol of network connection. For its excellent reliability as well as robustness and the growth of Word Wide Web (WWW), the TCP/IP has been utilized extensively in the Internet now.
In the traditional network connection, if the transmitted data are larger, they will be divided into several smaller portions to avoid occupying too much frequency bandwidth in each transmitting, and then transmitted successively in the packet format; finally, all the packets will be received and combined by the remote receiver. However, because the networks are extended in all directions and the network statuses are instantaneously varying, the paths of packets may be different, and thus the received sequence may be different from the original transmitting sequence. Therefore, to avoid confusing the packet sequence and to recombine all the packets correctly, we need to define certain labels on such packets, which are so-called “header”. Please refer to FIG. 2, which shows the header format in typical TCP/IP packet of the prior art. The TCP/IP header can be divided into two pans: the Transmission Control Protocol (TCP) header and the Internet Protocol (IP) header.
The IP header comprises a Version field, which has 4 bits to label the using version of the TCP/IP; an Internet Header Length (IHL) field with 4 bits for specifying the length of the IP header; a Type of Service field with 8 bits, which relates to the quality service types such as the minimum delay, the maximum throughput, the maximum reliability, and the minimum cost; a Total Length field with 16 bits for indicating the total bits of the package; an Identification field with 16 bits, which gives each package an unique serial number for conveniently recombining the packages from the receiver. Next, the filed provides a 3 bits information flag set, an Indicate flag with 1 bit, a Don't Fragment (DF) flag with 1 bit, and a More Fragment (MF) flag with 1 bit. Wherein the indicate flag, shown as symbol A in FIG. 2, is used to indicate whether the information flag set is triggered; the DF flag, shown as symbol B in FIG. 2, is used to indicate whether the package may be fragmented, and the MF flag, shown as symbol C in FIG. 2, is used to indicate whether the package is the last one. And then, the next field is a Fragment Offset field with 13 bits for indicating the fragment address corresponded to the original location in the data beginning. The IP header also comprises a Time to Live field with 8 bits, which is used to indicate the maximum time the package is allowed to remain in the Internet system; a Protocol field with 8 bits for indicating the Internet protocol type the package is using; a Header Checksum field with 16 bits for detecting whether the package is transmitted correctly; a Source Address field with 32 bits for storing the address of the transmitting terminal; and a Destination Address field with 32 bits for storing the address of the receiver. In summary, the total length of the IP header is 20 bytes.
The TCP header comprises a Source Port field with 16 bits for indicating the working port of the transmitting terminal; a Destination Port field with 16 bits for indicating the working port of the receiver; a Sequence Number field with 32 bits, which corresponds to the sequence number of the package; an Acknowledgment Number field with 32 bits for indicating the sequence number of the package that has been received; a TCP Header Length field with 4 bits for specifying the figure of the TCP header length; a Reserved field with 6 bits, which is not used yet. Next the reserved field are 6 control bits, including an Urgent Flag field (URG), shown as symbol D in FIG. 2, with 1 bit for indicating whether the package carries the urgent data; an Acknowledgment Flag field (ACK), shown as symbol E in FIG. 2, with 1 bit for indicating whether the package asks the response form the receiver; a Push Flag field (PSH), shown as symbol F in FIG. 2, with 1 bit for indicating whether the package pushes the receiver to send the data to the application program; a Reset the Connection Flag field (RST), shown as symbol G in FIG. 2, with 1 bit for indicating whether the data needs to be resent; a Synchronous Flag field (SYN), shown as symbol H in FIG. 2, with 1 bit for indicating whether the synchronization needs to be carried out; a Finish Flag field (FIN), shown as symbol I in FIG. 2, with 1 bit for indicating whether the transmission is over. After these control bits is a Buffer Length field with 16 bits, which is used to indicate how much free space is remained so as to avoid the error of Overflow. The TCP header also has a Checksum field with 16 bits for detecting whether the package is transmitted correctly and an Urgent Pointer field with 16 bits for storing the urgent data when the URG control bit is set. In summary, the total TCP header is of 20 bytes. And basically, the length of TCP header is a multiple of word.
From the foregoing, the total length of general TCP/IP header is 40 bytes. That is, each packet needs to increase the transmission amount of 40 bytes, which is very bulky and wasted, and thereby several new header compression formats are continually brought out to solve such problems. In the traditional network connection, the most common header compression method is Van Jacobson TCP/IP header compression, so-called RFC 1144 compression. RFC 1144 can efficiently compress the header format by eliminating some unvaried fields such as the Version field, the Destination Address field, and the Destination Port field during the lifetime.
In the wireless communication, RFC 1144 is also the most familiar method to compress the headers at the present day. However, it is designed to aim at the traditional network connection structure, not at the wireless telecommunication, so this method does not consider the limitation of the wireless transmission such as the narrower bandwidth. In fact, the major bottleneck of the wireless Internet is the part of the wireless communication, so how to increase the efficiency of the data transmission has become a very important topic now. In the wireless communication, each unnecessary data transmitting will reduce the substantial transmission rate; therefore, if we can utilize some characters of wireless telecommunication to carry out the further data compression, the data transmission rate will be improved largely. |
Human type-alpha transforming growth factor undergoes slow conformational exchange between multiple backbone conformations as characterized by nitrogen-15 relaxation measurements. Human type-alpha transforming growth factor (hTGF alpha) is a small mitogenic protein containing 50 amino acids and three disulfide bonds. It has both sequence and structural homology with epidermal growth factor (EGF). While the three-dimensional structures of hTGF alpha and other EGF-like proteins have been studied extensively, relatively little is known about conformational dynamics of these molecules. In this paper we describe nuclear relaxation measurements which probe the molecular dynamics of hTGF alpha in aqueous solution at neutral pH. In order to characterize conformational dynamics of hTGF alpha on both the fast (i.e., sub-nanosecond) and intermediate nitrogen-15 chemical-exchange (i.e., microsecond) time scales, we measured nitrogen-15 relaxation parameters at pH 7.1 +/- 0.1 and a temperature of 30 +/- 0.5 degrees C. Measurements of nitrogen-15 longitudinal (R1) and transverse (R2) relaxation rates, and 1H-15N heteronuclear NOE effects, were then interpreted using an extended Lipari-Szabo analysis to provide estimates of the locations and amplitudes of fast internal motions and the locations of nitrogen-15 chemical-exchange line broadening. These results demonstrate that, under conditions of pH and temperature at which it is tightly bound by the EGF receptor, hTGF alpha is a highly dynamic molecule. Indeed, some 40% of the backbone amide groups of hTGF alpha, including many at the interface between the two subdomains, exhibit significant nitrogen-15 chemical-exchange line broadening indicative of interconversions between multiple protein conformations on the microsecond time scale. The distribution of these sites on the three-dimensional protein structure suggests that these dynamic fluctuations are due to (i) partial unfolding of the core beta-sheet, (ii) hinge-bending motions between the N- and C-terminal subdomains, and/or (iii) disulfide bond isomerization in the solution structure of hTGF alpha at neutral pH. |
After nearly three months of renovation, the Burke Baker Planetarium reopens Friday in time for Spring Break.
When the newly renovated Burke Baker Planetarium opens Friday, the Houston Museum of Natural Science will have gone where no other museum has gone before.
“We wanted to be the first of something and we thought being the highest resolution planetarium in the world was awfully cool,” says Carolyn Sumners, Phd, the museum’s vice president for Astronomy and Physical Sciences.
Sumners played a key role in the $2.5 million project. She adds that the American Museum of Natural History in New York wanted first dibs on the system, but their timing was off.
Some of the highlights include 10 projectors that deliver more than 50 million pixels onto the new dome. The contrast ratio of the display – which means the ratio between the brightest and darkest colors – is huge.
“It looks so black in there,” Sumners says. “The star field looks like West Texas on a great, great night.”
Board member Stephen Brown, MD, says they’ve also changed the seating, which used to be on a flat surface.
“Now we have a tilt of approximately 20-plus degrees to the seating, as well as to the dome,” Brown says. “Which enables the viewers to really feel like they’re immersed in the dome experience.” |
Q:
How broken is LCG in the case of partial output?
Suppose we have a linear congruential generator defined by $X_{n+1} = (a X_n + c) \mod 2^n$ where $a, c, n$ are all known and we would like to determine the initial value $X_0$. However, if we can only see the $k$ high-order bits of each of the $X_i$ for $i \geq 0$, the best algorithm I know of is a brute force which tries $O(2^{n-2k})$ possibilities, and this is exponential in $n$.
Compare this to the Mersenne Twister, whose initial state can be computed in polynomial time given only the high-order bits of each output (it reduces to solving a system of $n$ equations in $n$ unknowns). Is there a better algorithm out there which can solve for the initial state of an LCG given only the high-order bits of each output, and if not, is LCG actually broken?
A:
Yes, there are techniques based on lattice reduction that are faster than brute force. See, e.g., https://crypto.stackexchange.com/a/20714/351, especially the first 3 papers cited there.
One can also use meet-in-the-middle techniques to get the running time down to $O(2^{(n - k)/2})$ if $k \ge n/2$: see https://crypto.stackexchange.com/a/10609/351. |
<reponame>S8Vega/api-ecommerce<gh_stars>0
package com.servitec.modelo.dao.interfaz;
import org.springframework.data.repository.CrudRepository;
import com.servitec.modelo.entidad.PaqueteClienteSerial;
public interface IPaqueteClienteSerialDao extends CrudRepository<PaqueteClienteSerial, Long> {
}
|
Polycystic Ovary Syndrome: An Evolutionary Adaptation to Lifestyle and the Environment Polycystic ovary syndrome (PCOS) is increasingly recognized as a complex metabolic disorder that manifests in genetically susceptible women following a range of negative exposures to nutritional and environmental factors related to contemporary lifestyle. The hypothesis that PCOS phenotypes are derived from a mismatch between ancient genetic survival mechanisms and modern lifestyle practices is supported by a diversity of research findings. The proposed evolutionary model of the pathogenesis of PCOS incorporates evidence related to evolutionary theory, genetic studies, in utero developmental epigenetic programming, transgenerational inheritance, metabolic features including insulin resistance, obesity and the apparent paradox of lean phenotypes, reproductive effects and subfertility, the impact of the microbiome and dysbiosis, endocrine-disrupting chemical exposure, and the influence of lifestyle factors such as poor-quality diet and physical inactivity. Based on these premises, the diverse lines of research are synthesized into a composite evolutionary model of the pathogenesis of PCOS. It is hoped that this model will assist clinicians and patients to understand the importance of lifestyle interventions in the prevention and management of PCOS and provide a conceptual framework for future research. It is appreciated that this theory represents a synthesis of the current evidence and that it is expected to evolve and change over time. Introduction Polycystic ovary syndrome is a reversible metabolic condition that makes a significant contribution to the global epidemic of lifestyle-related chronic disease. Many of these chronic diseases share a similar pathogenesis involving the interaction of genetic and environmental factors. The revised International Guidelines for the assessment and management of women with PCOS emphasize that the associated metabolic dysfunction and symptoms should initially be addressed via lifestyle interventions. A unified evolutionary model proposes that PCOS represents a mismatch between our ancient biology and modern lifestyle. Evolutionary medicine is an emerging discipline involving the study of evolutionary processes that relate to human traits and diseases and the incorporation of these findings into the practice of medicine. Evolutionary medicine brings together interdisciplinary research to inform clinical medicine based on the influence of evolutionary history on human health and disease. Previous utilization of the principles of evolutionary medicine has been limited to monogenetic diseases (cystic fibrosis, sickle cell anemia, phenylketonuria and many others), drug resistance of microorganisms, tumor growth and chemoresistance. Future insights into the application of evolutionary research offers the potential humans, the origin of excess androgens may be from maternal, fetal or placental sources. In addition, emerging and concerning evidence suggests that EDC may contribute to altered fetal programming and play a role in the pathogenesis of PCOS. In utero genomic programming of metabolic and endocrine pathways can increase the susceptibility of offspring to develop PCOS following exposure to specific nutritional and environmental conditions. This view of the pathogenesis of PCOS is consistent with the Developmental Origins of Health and Disease (DOHaD) model proposed by Neel. Postnatal exposure to lifestyle and environmental factors, such as poor-quality diet and EDC, may activate epigenetically programmed pathways that further promote the observed features of PCOS. Dietary and lifestyle interventions have demonstrated that many of the clinical, metabolic and endocrine features of PCOS can be reversed. Lifestyle-induced changes in the gastrointestinal tract microbiome are another significant factor in the etiology of PCOS. Dysbiosis of the gut microbiota has been hypothesized to play a role in increased gastrointestinal permeability, initiating chronic inflammation, insulin resistance (IR) and hyperandrogenism. Numerous studies have reported reduced alpha diversity of the microbiome that has been associated with the metabolic, endocrine and clinical features observed in women with PCOS. The resulting dysbiosis has been shown to be reversible after interventions aimed at improving diet quality or treatment with probiotics or synbiotics . When these same genetic variants are exposed to modern lifestyle and environmental influences, maladaptive physiological responses occur. The prior advantages of insulin resistance, hyperandrogenism, enhanced energy storage and reduced fertility in ancestral populations become pathological and result in the observed features of PCOS in contemporary women ( Figure 1).. 2021, Journal of ACNEM. Materials and Methods The literature search focused on research publications related to the pathogenesis of PCOS using the keywords listed above and related mesh terms for data on the. 2021 Journal of ACNEM. Materials and Methods The literature search focused on research publications related to the pathogenesis of PCOS using the keywords listed above and related mesh terms for data on the evolutionary aspects of PCOS, genetic studies, in utero developmental epigenetic programming, transgenerational inheritance, metabolic features including insulin resistance, obese and lean PCOS phenotypes, reproductive changes and subfertility, impact of the microbiome and dysbiosis, possible effects of endocrine-disrupting chemical exposure and the influence of lifestyle factors such as diet and physical activity. The databases searched included PubMed, Scopus, Cochrane and Google Scholar. Relevant papers were selected, and citation searches were performed. The present manuscript synthesizes the findings into a unified evolutionary model. The following text is presented as a narrative review of factors involved in the pathogenesis of PCOS and is discussed in ten main subject areas that provide the rationale for the development of a unified model. 1. Evolution 2. Genetics 3. Developmental Epigenetic Programming 4. Microbiome and Dysbiosis 5. Insulin resistance 6. Obesity and the lean paradox 7. Endocrine-Disrupting Chemical Exposure 8. Lifestyle contributors to the pathogenesis of PCOS 9. Circadian Rhythm Disruption and PCOS 10. Conceptual Framework and Summary of the Unified Evolutionary Model. Evolution The description of PCOS phenotypes can be found in medical records from antiquity and the modern syndrome was described over 80 years ago. Nevertheless, there is ongoing debate regarding the evolutionary origins of PCOS 39,. PCOS susceptibility alleles may have arisen in our phylogenetic ancestors, in the hunter-gatherer Paleolithic period of the Stone Age, after the Neolithic Agricultural Revolution or following the Industrial Revolution. From an evolutionary perspective, nearly all genetic variants that influence disease risk have human-specific origins, but the systems they relate to have ancient roots in our evolutionary ancestors. Regardless of the precise timing of the origin of PCOS in humans, the complex metabolic and reproductive gene variants identified in women with PCOS relate to ancient evolutionary-conserved metabolic and reproductive survival pathways. Although evolutionary hypotheses about disease vulnerability are impossible to prove they have the potential to frame medical thinking and direct scientific research for the proximate causes of disease. Multiple hypotheses have been proposed regarding the evolutionary origins of PCOS and related metabolic diseases. These hypotheses are focused on the relative importance of metabolic survival adaptations versus improved reproductive success, or a combination of both. A detailed analysis of these hypotheses, and the complexities of the evolutionary considerations, have been reviewed elsewhere and is beyond the scope of the present review. One common theme is that PCOS may be viewed as a "conditional phenotype" where a specific set of conditions has unmasked normally unexpressed or partly expressed genetic pathways, which then provide a survival advantage under certain environmental conditions. All organisms have physiological adaptive responses to deal with changing environmental conditions (starvation, fasting, physical threat, stress and infection) and the varying demands of internal physiological states (pregnancy, lactation and adolescence). It has been proposed that the PCOS phenotype may have been invoked in specific environmental conditions in ancestral populations as a short, medium or even long-term adaptive survival mechanism. The view of PCOS as a conditional phenotype proposes that these physiological responses become pathological in our modern environment due to factors such as food abundance, reduced physical activity, circadian disruption, stress and environmental chemical exposure. The transgenerational evolutionary theory of the pathogenesis of PCOS encompasses all of the above ideas to explain the observed pathophysiological and clinical features of PCOS. It is generally accepted that almost all pre-industrial societies and animal populations experienced seasonal or unpredictable episodes of food shortage that applied evolutionary pressure to develop metabolic and reproductive adaptive survival responses. It is also appreciated that metabolic and reproductive pathways are interconnected and involve reciprocal feedback control mechanisms. During periods of starvation, anorexia or excessive weight gain, reproduction is down-regulated and ovulation becomes irregular or ceases. Similarly, metabolic function is coordinated with the menstrual cycle to ensure optimal physiological conditions for fertilization, implantation, pregnancy, parturition and lactation. Recent research has elaborated on the details of how some of these complex regulatory mechanisms interact using specific hormonal, nutrient sensing and intracellular signaling networks. Details of the mechanisms underlying the proposed adaptive survival advantages of IR, hyperandrogenism, enhanced energy storage and subfertility have been obtained from paleolithic records, animal models and human populations exposed to adverse environmental conditions such as war and famine-inflicted starvation. Multiple lines of evidence support the maladaptive response of human populations to rapidly changing nutritional, physical, psychological and cultural environments, in the modern world. These "adaptations" result in pathological responses to IR, hyperandrogenism, enhanced energy storage and ovulation ( Figure 1). Theories of evolutionary mismatch have also been advanced to explain all of the cluster of metabolic diseases associated with PCOS (type 2 diabetes, metabolic syndrome, NAFLD and cardiovascular disease) and follow the same set of basic principles and explanations. This common body of evolutionary evidence is supported by the increasing incidence of metabolic-related disease, such as diabetes and obesity, in developed countries and in developing nations adopting a Western diet and lifestyle. In addition, the demonstrated reversibility of PCOS and related metabolic and biochemical features following changes in diet, increased physical activity and other lifestyle interventions, adds further support to a transgenerational evolutionary model. Genetics The heritable nature of PCOS has been proposed since the 1960 s following a range of familial, twin and chromosomal studies. Cytogenetic studies failed to identify karyotypic abnormalities and genetic studies did not show a monogenic inheritance pattern following examination of candidate genes. In addition, two or more phenotypes can be present in the same family suggesting that some of the phenotypic differences could be accounted for by variable expression of the same shared genes. The mapping of the human genome in 2003 and the publication of the human haplotype map (more than one million single nucleotide polymorphisms of common genetic variants) in 2005, lead to the realization that most DNA variation is shared by all humans and is inherited as blocks of linked genes (linkage disequilibrium). These advances enabled a revolution in case-control studies and the development of GWAS which map the entire human genome looking for susceptibility genes for complex traits such as obesity, type 2 diabetes and PCOS. The first PCOS GWAS was published in 2010 and demonstrated 11 gene loci associated with PCOS. Additional loci have subsequently been found in several different ethnic groups. The first GWAS analysis of quantitative traits was published in 2015 and showed that a variant (rs11031006) was associated with luteinizing hormone levels. The largest GWAS included a meta-analysis of 10,074 PCOS cases and 103,164 controls and identified 19 loci that confer risk for PCOS. The genes associated with these loci involve gonadotrophin action, ovarian steroidogenesis, insulin resistance and type 2 diabetes susceptibility genes. The first GWAS using electronic health record-linked biobanks has introduced greater investigative power and identified 2 additional loci. These variants were associated with polycystic ovaries and hyperandrogenism (rs17186366 near SOD2) and oligomenorrhoea and infertility (rs144248326 near WWTR1). In addition to identifying common gene variants for PCOS phenotypes, finding the same signals (THADA, YAP1 and c9orf3) in Chinese and European populations suggests that PCOS is an ancient trait that was present before humans migrated out of Africa. More recently Mendelian randomization (MR) studies have been used to explore the potential causative association between gene variants identified in GWAS and PCOS. Many of the gene variants identified in GWAS are located in non-coding regions of DNA. The genes or functional DNA elements through which these variants exert their effects are often unknown. Mendelian randomization is a statistical methodology used to jointly analyze GWAS and quantitative gene loci to test for association between gene expression and a trait, due to a shared or potentially causal variant at a specific locus. A detailed analysis of MR methodology and the limitations of this statistical tool is beyond the scope of the present review. Although MR studies have the potential to infer causation it is recognized that they also have limitations in PCOS research. Nevertheless, preliminary evidence suggests that several genes related to obesity, metabolic and reproductive function, may play a causal role in the pathogenesis of PCOS. Decades of genetic research has therefore characterized PCOS as a polygenic trait that results from interactions between the environment and susceptible genomic traits. The failure to identify a qualitative or monogenic inheritance pattern and the findings from GWAS, MR, familial and twin studies, suggests that the heritability of PCOS is likely to be due to the combination of multiple genes with small effect size, as has been found with obesity and type 2 diabetes [79,80,. Polygenic traits are the result of gene variants that represent one end of the bell-shaped normal distribution curve of continuous variation in a population. From an evolutionary perspective, women with PCOS may represent the "metabolic elite" end of the normal distribution curve, being able to efficiently store energy in periods of food abundance and down-regulate fertility in times of food scarcity, or even in anticipation of reduced seasonal food availability as a predictive adaptive response. The realization that PCOS is a quantitative trait (phenotype determined by multiple genes and environmental factors) has far-reaching implications for the diagnosis, treatment and prevention of symptoms and pathology associated with PCOS. The implications require a shift in thinking about PCOS as a "disease" to a variation of normal metabolic and reproductive function. This shift invites a change in vocabulary from talking about "disorder" and "risk" to talking about "expression" and "variability". This new understanding supports and reinforces an evolutionary model of the pathogenesis of PCOS. In keeping with this model, multiple lines of evidence suggest that inherited PCOS gene variants are developmentally programmed in a way that primes them for activation by nutritional and environmental factors in postnatal life. Developmental Epigenetic Programming The developmental programming of PCOS represents changes in gene expression that occur during critical periods of fetal development. Following fertilization, most parental epigenetic programming is erased and dramatic epigenomic reprogramming occurs. This results in transformation of the parental epigenome to the zygote epigenome and determines personalized gene function. Compelling evidence shows that a wide range of maternal, nutritional and environmental factors can effect fetal development during these critical periods of programming. These include hormones, vitamins, diet-derived metabolites and environmental chemicals. In addition, epigenetic reprogramming of germ-line cells can lead to transgenerational inheritance resulting in phenotypic variation or pathology in the absence of continued direct exposure. Experimental studies in primates, sheep, rats and mice show that PCOS-like syndromes can be induced by a range of treatments including androgens, anti-Mullerian hormone and letrozole. Nevertheless, there is significant debate regarding when an animal model qualifies as PCOS-like. The model used and the method of induction of PCOS phenotypes therefore needs to be carefully scrutinized when generalizing findings from animal research to women with PCOS. Most of the animal and human research on the developmental origins of PCOS has focused on the role of prenatal androgen exposure. This has been extensively reviewed in numerous previous publications. This research has resulted in a proposed "two hit" hypothesis for the development of PCOS phenotypes. The "first hit" involves developmental programming of inherited susceptibility genes and the "second hit" arises due to lifestyle and environmental influences in childhood, adolescence and adulthood. If PCOS is a quantitative trait involving normal gene variants, as suggested by the evolutionary considerations and findings from genetic research, then the "first hit" may result from normal developmental programming events as occurs with other gene variants. According to this hypothesis, the polygenic susceptibility genes would be normally "activated" and "primed" to respond to future maternal and environmental conditions and exposures, as would be the case with many other normal genes. In addition, the susceptibility alleles may be "activated" or "functionally enhanced" by a range of maternal and environmental factors, as is usually presumed to be the case in PCOS. This developmental plasticity would provide a mechanism for a predictive adaptive response, based on inputs from the maternal environment that could be used to program metabolic and reproductive survival pathways, to better prepare the offspring for the future world in which they may be expected to live. Parental lifestyle factors including diet, obesity, smoking and endocrine-disrupting chemicals, have all been shown to modulate disease risk later in life. The original description of the fetal origin's hypothesis proposed that poor maternal nutrition would increase fetal susceptibility to the effects of a Western-style diet later in life. Subsequent studies have confirmed that maternal exposure to either nutrient excess or deficit, can have long-term consequences for the health of the progeny. Evidence from human and animal studies suggests that maternal obesity programs the offspring for increased risk of developing obesity, hyperglycemia, diabetes, hypertension and metabolic syndrome. The developmental origins of PCOS may have been due to different factors in ancestral and modern populations. It has been hypothesized that environmental stress, infection, nutrient deprivation, fetal growth restriction and stress hormone responses may have resulted in maternally mediated modulation of gene expression in ancestral offspring. Some of these factors have been investigated and confirmed in modern populations subject to starvation and extreme environmental conditions. In contrast, altered fetal programming in modern societies may be secondary to maternal overnutrition, sedentary behavior, obesity, emotional stress, circadian rhythm disruption, poor gut health or environmental chemical exposure. The preconception and pregnancy periods therefore provide a unique opportunity for lifestyle interventions that promote optimal future health for both the mother and the offspring (Figure 2). Figure 2. Nutritional and environmental influences throughout the life course and the perpetuation of the transgenerational inheritance of polycystic ovary syndrome. Adapted with permission from Ref. 2020, Journal of ACNEM. Microbiome and Dysbiosis The gastrointestinal microbiome is now appreciated to play a central role in human health and disease. The microbiome is known to co-regulate many physiological functions involving the immune, neuroendocrine and metabolic systems via complex re- Microbiome and Dysbiosis The gastrointestinal microbiome is now appreciated to play a central role in human health and disease. The microbiome is known to co-regulate many physiological functions involving the immune, neuroendocrine and metabolic systems via complex reciprocal feedback mechanisms that operate between the microbial ecosystem and the host. Evidence from studies in Western populations, hunter-gatherer societies and phylogenetic studies in other species, have attempted to place the human microbiome into an evolutionary context. Although microbes clearly impact host physiology and have changed along branches of the evolutionary tree, there is ongoing debate regarding whether the microbiome can evolve according to the usual evolutionary forces. Nevertheless, it has been argued that focusing on functional pathways and metabolic roles of microbial communities, rather than on specific microbes, provides a better model for understanding evolutionary fitness. The co-evolution of the microbiome and human physiology may therefore be important in understanding the differences between ancient adaptive physiological survival mechanisms and modern lifestyle-related pathological responses, in women with PCOS ( Figure 1). Twin studies and GWAS show that host genetics can influence the microbiome composition, and microbes can exert effects on the host genome, although the environment has an important role. Humans are constantly adapting to the gut microbiome to try to determine which microorganisms are beneficial or harmful. Immune genes involved in this process are the most rapidly evolving protein-encoding genes in the mammalian genome. Diversification of microbes allows humans to access dietary niches and nutritional components they otherwise would not be able to access, which may be beneficial and ultimately lead to the integration of specific microbes into the ecosystem. Although no living population today carries an ancestral microbiome, comparison studies of non-Western and Western populations show significant differences in the relative abundances of common phyla and a much greater species diversity in non-Western populations. A review of non-human primate and human gut microbiome datasets, revealed a changing microbiome in response to host habitat, season and diet, although there appear to be common species-specific symbiotic communities. Rapid human cultural changes have resulted in significant dietary modifications in urban-industrialized communities and shifted the microbiome at an unprecedented rate. The result has been the development of a mismatch between human metabolic genes and bacteria that enhance fat storage. In our evolutionary past, when nutrients were scarce, it has been theorized that host selection led to the maintenance of microbes that enhance nutrient uptake or host energy storage. However, in the modern environment, where a high-fat, high-sugar, low-fiber diet has become common and easily accessible, integration of these microbes leads to maladaptive physiological responses. For metabolically thrifty individuals with PCOS, harboring microbes that enhance energy storage escalates the evolutionary conflict, furthering the development of insulin resistance and therefore progression to obesity and type 2 diabetes. Further compounding this maladaptive response is the loss of microbes that are required to access other dietary niches. One example is the loss of symbiotic species of Treponema in individuals living in urbanindustrialized communities. A change from the ancestral hunter-gatherer diet, where foods consumed changed seasonally and a wide variety of food components were eaten, to a diet that is similar across seasons and significantly less varied, is another likely contributor to reduced diversity of the microbiomes of individuals living in urbanized-industrialized communities. The majority of women with PCOS are overweight or obese and evidence indicates that the microbiome of obese individuals is capable of extracting more energy from the host diet compared with the microbiome of lean individuals. This is thought to be driven by an expansion in pro-inflammatory species of bacteria, such as E. coli, and a depletion of anti-inflammatory bacteria such as Faecalibacterium prausnitzii. Chronic low-grade 'metabolic' inflammation, or meta-inflammation, is a result of an imbalanced gut microbiome that promotes the development of insulin resistance and type 2 diabetes. The dysbiosis of gut microbiota theory of PCOS, proposed by Tremellen in 2012, accounts for the development of all of the components of PCOS (multiple ovarian follicles, anovulation or menstrual irregularity and hyperandrogenism). The theory proposes that a poor-quality diet and resulting imbalanced microbiome, induces intestinal permeability and endotoxemia, exacerbating hyperinsulinemia. Increased insulin levels promote higher androgen production by the ovaries and disrupts normal follicle development. Metabolic, endocrine and environmental factors associated with PCOS are not mutually exclusive, and therefore their relative contributions to dysbiosis in PCOS remains uncertain. Consuming a balanced diet that is low in fat and high in fiber, can also restore balance to the ecosystem (termed eubiosis). A recent study showed that dietary intake of fiber and vitamin D was significantly decreased in both lean and obese women with PCOS, compared to healthy controls, and correlated with lower diversity of the gut microbiome. Dysbiosis is reversible with improvement in diet quality augmented by the addition of probiotics or synbiotics [51,. Dysbiosis is a consistent finding when looking at the microbiome of women with PCOS. Although most studies are small, dysbiosis has consistently been found to correlate with different physiological parameters, such as obesity, sex hormones and metabolic defects. Similar to microbiomes associated with obesity, the microbiomes of individuals with PCOS have generally been found to have lower alpha diversity (lower numbers of bacterial taxa) than controls, and most studies describe an altered composition of taxa relative to controls. However, the bacterial taxa observed to be either increased, depleted or absent in PCOS differs from study to study. This is likely due to both the immense inter-individual variation in microbiotas, as well the fact that PCOS is a quantitative trait with women with various degrees and levels of obesity and sex hormones. In keeping with the developmental origins hypothesis previously discussed, maternal androgens may alter the composition and function of the microbiome, therefore facilitating the pathogenesis of PCOS. One study showed that beta diversity, which is used to measure differences between groups, was negatively correlated with hyperandrogenism, suggesting that androgens play a significant role in dysbiosis. The 'first hit' in utero may therefore combine with vertical transmission of a dysbiotic microbiome from a mother with PCOS, resulting in dysbiosis in the offspring. Preconception and pregnancy provide a unique opportunities for lifestyle and dietary interventions aimed at restoring eubiosis, to enable the transference of a balanced ecosystem to the offspring, via vertical transmission. The accumulating scientific evidence strongly supports the significant role played by the microbiome in the pathogenesis and maintenance of PCOS, consistent with research in other related metabolic conditions. The role of dysbiosis is supported by over 30 proof-ofconcept studies that have recently been reviewed. Dysbiosis is therefore a significant factor in the pathogenesis of PCOS and an important component of a unified evolutionary model. Dysbiosis represents a maladaptive response of the microbiome to modern lifestyle influences and is a modifiable factor in the treatment of women with PCOS. Insulin Resistance There are several dilemmas when assessing the role of IR in women with PCOS. There is no consensus on the definition of IR, measurement is difficult, whole-body IR is usually measured although it is recognized that IR can be selective being either tissue-specific or pathway-specific within cells, normal values are categorical and determined by arbitrary cut-offs (4.45 mg/kg/min), testing is not recommended in clinical practice, reported prevalence rates in obese and lean women vary widely, and the significance of IR as a pathognomonic component of PCOS is an area of debate. Despite these limitations, it is hypothesized that IR is a significant proximate cause of PCOS and is intrinsic to the underlying pathophysiology. In addition, it is recognized that IR plays a major role in the pathophysiology of all of the metabolic diseases, cardiovascular disease, some neurodegenerative diseases, and selected cancers. Insulin resistance is therefore considered to be the main driver for many diseases and makes a significant contribution to the chronic disease epidemic. Nevertheless, being able to vary the sensitivity and physiological action of insulin is thought to have conferred a significant adaptive survival role in many animals throughout evolutionary history. It has been proposed that IR may have evolved as a switch in reproductive and metabolic strategies, since the development of IR can result in anovulation and reduced fertility, in addition to differential energy repartitioning to specific tissues. Insulin receptors are located on the cell membranes of most tissues in the body. Ligand binding to the alpha-subunit induces autophosphorylation of specific tyrosine residues on the cytoplasmic side of the membrane. The activated insulin receptor initiates signal transduction via the phosphatidylinositol-3 kinase (PI-3K) metabolic pathway and the mitogen-activated protein kinase pathway (MAPK) which is involved in cell growth and proliferation. Insulin is an anabolic hormone that facilitates glucose removal from the blood, enhances fat storage and inhibits lipolysis in adipose tissue, stimulates glycogen synthesis in muscle and liver and inhibits hepatic glucose output. IR can be defined as a state where higher circulating insulin levels are necessary to achieve an integrated glucose-lowering response. IR results from alterations to cellular membrane insulin-receptor function or intracellular signaling, enzyme, metabolic or gene function. Insulin resistance can be caused by a wide variety of mechanisms that have the ability to disrupt any part of this metabolic signaling system. These include autoantibodies, receptor agonists and antagonists, hormones, inflammatory cytokines, oxidative stress, nutrient sensors and metabolic intermediates. Physiological regulation of insulin function can be viewed as an adaptive mechanism to regulate the metabolic pathway of insulin signaling (PI-3K), in response to changing environmental conditions or during normal alterations of internal states (pregnancy, lactation, adolescence). The physiological activation of IR allows the organism to switch from an anabolic energy storage state to a catabolic or energy mobilizing state. This allows free fatty acids to be mobilized from adipose tissue, which are then converted to glucose in the liver and released into the circulation. As a result of this metabolic change, blood sugar levels are maintained for vital metabolic processes and brain function. This adaptive protective mechanism can be pathway-specific during periods of growth, such as pregnancy, lactation and adolescence, so that only the metabolic signaling (PI-3K) is inhibited and not the mitogenic pathway (MAPK), which may even be up-regulated. When the physiology of insulin function is considered to be a quantitative or continuous variable from an evolutionary perspective, it is likely that all women with PCOS, whether obese or lean, have reduced insulin sensitivity. A systematic review and meta-analysis of euglycemic-hyperinsulinemic clamp studies found that women with PCOS have a 27% reduction in insulin sensitivity compared to body mass index (BMI) and age-matched controls. In evolutionary terms, women with a PCOS metabolic phenotype would have increased survival chances during times of environmental or physiological demand for altered energy metabolism, but be more vulnerable to the pathological effects of IR when exposed to modern lifestyle factors. In particular, a poor-quality, high-glycemic, high-fat, low-fiber diet has been shown to cause IR. As discussed in the dysbiosis section, diet-related changes in the gastrointestinal microbiome have also been shown to cause IR in women with PCOS. Numerous studies have shown that dietary modification, or treatment with probiotics or synbiotics, has the potential to restore normal insulin function. Consumption of a high-glycemic-load diet results in rapid increases in blood sugar levels that cause compensatory hyperinsulinemia. Excessive dietary intake of glucose and fructose are converted to fatty acids by de novo lipogenesis in the liver, transported to adipocytes via lipoproteins, released as fatty acids to adipocytes and stored in fat globules as triglycerides. As a result of nutrient overload, diacylglycerol, the penultimate molecule in the synthesis of triglyceride, accumulates in the cytoplasm and binds with the threonine amino acid in the 1160 position of the insulin receptor. This inhibits autophosphorylation and down-regulates the metabolic PI-3K pathway and causes IR. This process has the potential to be reversible following changes in diet quantity and quality, as has been shown to occur with calorie restriction, fasting, time-restricted eating, gastric bypass surgery, low saturated fat and low glycemic diets. Diets high in animal protein or saturated fat can also cause IR independent of BMI. These mechanisms provide the rationale for the principal recommendation of the International Guidelines that women with PCOS should be advised about dietary modification as the first line of management in all symptom presentations. Obesity and the Lean PCOS Paradox Insight can be obtained into the role of obesity in women with PCOS by examining the evolutionary history, genetic studies and pathological disorders of adipose tissue. The ability to store energy is a basic function of life beginning with unicellular organisms. In multicellular organisms, from yeast to humans, the largest source of stored energy is as triglycerides in lipid droplets in order to provide energy during periods when energy demands exceed caloric intake. Understanding the biological functions of adipose tissue has progressed from energy storage and thermal insulation to that of a complex endocrine organ with immune and inflammatory effects and important reproductive and metabolic implications. Adipose tissue is organized into brown adipose tissue (BAT) and white adipose tissue (WAT), both with different functions. Although the evolutionary origins of BAT and WAT are the subject of ongoing debate, BAT is located in the supraclavicular and thoracic prevertebral areas and is primarily involved in cold thermogenesis and regulation of basal metabolic rate. WAT is distributed in multiple anatomical areas such as visceral adipose tissue (VAT) and subcutaneous adipose tissue (SAT) and functions as a fat storage depot and an endocrine organ. An additional layer of SAT is thought to have evolved as insulation against cool night temperatures in the Pleistocene open Savanah. The lower body distribution of SAT in women is hypothesized to have evolved to provide additional calorie storage for pregnancy and lactation and is unique to human females. Lower body SAT has a metabolic program that makes it less readily available for every-day energy needs, but it can be mobilized during pregnancy and lactation. In addition, excess accumulation of SAT is much less likely to cause IR and metabolic dysfunction and explains why IR is not observed in all obese individuals. Visceral WAT is associated with IR in women with PCOS leading to both metabolic and reproductive problems. Multiple lines of evidence from evolutionary history, genetic and twin studies, support a genetic basis for obesity and differences in obese and lean phenotypes in women with PCOS. The majority of women with PCOS are overweight or obese, with reports ranging from 38-88%. Studies comparing obese and lean women with PCOS have several methodological problems including small sample size, overlap of PCOS characteristics with normal pubertal changes, non-standardized diagnostic criteria, and limited generalizability to the entire population due to a focus on a specific ethnic group. In addition, most of the studies examining body composition in PCOS have relied on anthropomorphic measurements (BMI, waist circumference, waist-to-hip ratio) which are considered inaccurate compared with the current gold-standard of magnetic resonance imaging. Consequently, there is wide heterogeneity in reports examining the relation-ship between body composition measures, including extent of VAT and metabolic changes such as IR. In humans, there is large individual variation in the fat storage capability and expandability of different adipose tissue depots. It has been hypothesized that once the genetically determined limit of expandability of SAT is reached, there is expansion of VAT and excess lipid accumulation in muscle, liver and other organs, resulting in IR, inflammation and metabolic dysregulation. We hypothesize that lean women with PCOS have a genetically determined limited ability to store excess lipid in SAT, but develop increased lipid deposition in VAT and organs such as the liver, resulting in metabolic dysregulation and IR in a similar manner to what occurs in obese women with PCOS. The wide variation in the genetic limitation of SAT expansion is also supported by studies in individuals with lipodystrophy. Lipodystrophies are a heterogenous group of rare inherited and acquired disorders characterized by a selective loss of adipose tissue. They are classified on the basis of the extent of fat loss as generalized, partial or localized. Patients with congenital generalized lipodystrophy have a generalized deficiency of fat from birth, usually have severe IR and develop diabetes at puberty. As a consequence of genetically limited ability for SAT lipid storage, lipids can only be stored ectopically in non-adipocytes resulting in major health consequences including IR, fatty liver, diabetes and PCOS. In contrast to generalized lipodystrophy, patients with familial partial lipodystrophy have normal fat distribution at birth but loose SAT in the limbs, buttocks and hips, at puberty. Fifty percent of women develop diabetes and 20-35% develop irregular periods and polycystic ovaries. Despite the rare nature of these syndromes much has been learned about the underlying genetic variants involved. Elucidation of clinical subtypes and the genetic background of patients with lipodystrophies may pave the way to new insights into the role of fat partitioning and obesity, and has implications for understanding the pathogenesis of insulin resistance, diabetes and PCOS. Lean women with PCOS may have a genetic predisposition for limited SAT fat storage, coupled with underlying metabolic predispositions that result in deposition of excess lipid in VAT and liver and the observed metabolic features of IR, fatty liver and diabetes. If the extent of IR and ectopic fat deposition is excessive, the resulting hormonal changes may be sufficient to cause oligomenorrhoea and subfertility as occurs with secondary familial partial lipodystrophy type 2. If this underlying mechanism is confirmed in future studies, the main difference between women with lean or obese PCOS may be the combined effects of metabolic programming and the genetically determined extent of SCT fat deposition. This would explain why lean women have all the same clinical, biochemical and endocrine features, although possibly less severe, than overweight and obese women with PCOS. Endocrine-Disrupting Chemical Exposure Anthropomorphic chemical exposure is ubiquitous in the environment and has possible effects on many aspects related to women's health and PCOS [36,. The identification of more than 1000 EDC in food, air, water, pesticides, plastics, personal care products, and other consumer goods, raises specific concerns for pregnant women and women with increased susceptibility to metabolic diseases such as PCOS [36,172,. Accumulating evidence suggests that EDC may be involved in the pathogenesis of PCOS given their known and potential hormonal and metabolic effects. This includes many of the areas that have been considered in the unified evolutionary model, such as developmental epigenetic programming, microbiome composition and function, metabolic processes such IR, and regulation of body weight. Many observational studies have demonstrated the presence of EDC in maternal and fetal serum and urine, amniotic fluid, cord blood and breast milk. Six classes of EDC have been shown to cross the placenta confirming that the fetus is exposed at all stages of development. Although it is impossible to perform experimental studies in humans, evidence from epidemiological, molecular toxicology and animal studies provide compelling evidence of adverse developmental effects and transgenerational toxicity. The realization of the tragic effects of DES in the 1970 s was first example of an in utero exposure causing serious transgenerational health effects. Several estrogenic EDC have been associated with birth outcomes that are thought to be associated with the development of PCOS. These include decreased birthweight (perfluoroakyl substances , perfluorooctanoic acid) and preterm birth (di-2-ethylhexyl phthalate). Prenatal exposure to androgenic EDC (triclosan, glyphosate, tributyltin, nicotine) is of increasing concern, given the suspected epigenetic role of in utero androgen exposure in the pathogenesis of PCOS. As a result, implementation of the precautionary principle is a high priority in counselling women with PCOS. International professional bodies (The Royal College of Obstetricians and Gynecologists, Endocrine Society, FIGO) have recommended that all pregnant women should be advised of the possible risks of EDC and that education programs be developed to inform health professionals. An explanation of the pathogenesis of PCOS should include reference to environmental chemical exposure and open the way for more detailed discussion of specific personalized advice and lifestyle recommendations. Lifestyle Contributors to the Pathogenesis of PCOS Several lifestyle factors have been investigated for their role in the pathogenesis of PCOS. These include diet, exercise, stress, sleep disturbance, circadian disruption and exposure to environmental chemicals. Recent advances in genomics, epigenetics, metabolomics, nutrigenomics, evolutionary biology, computer technology and artificial intelligence, are providing many insights into the mechanisms of how lifestyle factors impact the pathogenesis of PCOS. Nutritional studies based on diet indices, diet composition and metabolomics have identified dietary components that contribute to a healthy eating pattern. Healthy diet patterns, or wholefood diets, have been found to be effective in controlling and reversing many of the symptoms and metabolic alterations associated with PCOS. As previously discussed, the modern Western diet and lifestyle is at odds with our evolutionary background. One dietary component that differs significantly in ancestral and modern populations is dietary fiber intake. Assessment of dietary fiber intake is also a good surrogate marker for a healthy wholefood diet. In general, our traditional huntergatherer ancestors consumed significantly more fiber than modern populations. Studies that have investigated the dietary patterns of remaining contemporary hunter-gatherer societies, have found their dietary fiber intake to be around 80-150 g per day. This contrasts with the contemporary Western diet, where the average fiber intake is 18.2 g per day in children and 20.7 g per day in adults. Adequate dietary fiber consumption is important as it has several benefits, such as improved insulin sensitivity, reduced blood glucose levels, decreased systemic inflammation, lower serum levels of androgens and LPS, all of which have been linked to the pathogenesis of PCOS. Recent systematic reviews of observational studies and randomized controlled trials have found dietary fiber consumption to be inversely related to risk of obesity, type 2 diabetes, and cardiovascular disease. A recent cohort study from Canada found that obese women with PCOS consumed significantly less dietary fiber than normal weight women without PCOS. In addition, fiber intake of women with PCOS was negatively correlated with IR, fasting insulin, glucose tolerance and serum androgens. Hence, the mismatch between the amount of fiber traditionally consumed and the fiber content of Western diets, may be an important dietary component contributing to the increased rates of PCOS seen in developed and developing nations. Circadian Rhythm Disruption and PCOS The circadian rhythm is a mechanism with which living organisms can synchronize their internal biological processes with the external light and dark pattern of the day. Circadian rhythms have formed a central component of the evolutionary adaptation of all organisms to a variety of environmental conditions, from procaryotes to complex multicellular organisms. Most organisms experience daily changes in their environment, including light availability, temperature and food. Hundreds of thousands of years of evolution have synchronized the rhythmic daily programming of internal metabolic, endocrine and behavioral systems to the external environmental conditions. Circadian clocks anticipate environmental changes and confer a predictive adaptive survival benefit to organisms. The normal function of the circadian system is based on a hierarchical network of central and peripheral clocks. The central, or master clock, is in the suprachiasmic nucleus in the anterior hypothalamus. It is strategically placed to communicate with multiple physiological homeostatic control nuclei (body temperature, metabolic rate, appetite, sleep), pituitary hormonal systems (gonadal, thyroid, somatotrophic, adrenal), the autonomic nervous system (digestion, heart rate), and conscious cortical centers (behavior, motivation, reward, reproduction). Humans are programmed for specific day and night-time survival behaviors that are regulated by the availability of temperature, feeding and sunlight. Photons of light stimulate specialized photoreceptors in the retinal ganglion layer which transmit an electrical impulse to the cells of the master clock via the retinohypothalamic tract. The central clock can then convey rhythmic information to peripheral clocks in other tissues and organs throughout the body. Feeding and fasting cycles are the primary time cues for circadian clocks in peripheral tissues. Circadian clocks exist in all cells, including the microbiome, and function as autonomous transcriptional-translational genetic feedback loops. The changing length of daylight, determined by the rotation of the earth on its axis, requires that the autonomous clocks are reset, or entrained, on a daily basis. The molecular mechanisms of circadian clocks are similar across all species and are regulated by genetic enhancer/repressor elements, epigenetic modulation by methylation and acetylation, posttranslation modification of regulatory proteins, and a variety of hormonal and signaling molecules. This complex interconnected regulatory framework, ensures that the same molecules that regulate metabolism and reproduction, also contribute to a bidirectional feedback system with the autonomous circadian circuits. This results in synchronicity of internal physiology with environmental cues, to optimize both individual and species survival. Evolution has therefore provided a mechanism for humans to adapt and survive under the selective pressures of food scarcity, seasonal changes in sunlight and a range of temperature exposures. The evolutionary adaptive survival benefit of synchronized circadian systems in ancient populations is in marked contrast to the multiple circadian disruptions that are associated with modern lifestyle. These include poor-quality diet, improper meal timing and altered feeding-fasting behavior, sub-optimal exercise timing, disrupted sleep-wake cycles, shift work, EDC, and stress. Changes in all of these parameters are correlated with significant increases in obesity, diabetes, cardiovascular disease, and some cancers. Not surprisingly, lifestyle-related disturbances of circadian rhythms have also been investigated for their role in the pathogenesis of PCOS. The available evidence suggests that circadian disruption has detrimental effects on in utero development, altered metabolism and insulin resistance, body weight and obesity, and fertility. All these influences are relevant to an evolutionary model of the pathogenesis of PCOS. Recognition of the impact of lifestyle behaviors on circadian dysregulation and metabolic and reproductive function, opens the way for targeted intervention strategies to modulate and reverse these effects. These include regular meal timing, time-restricted feeding, restoration of normal sleep cycles, optimal exercise timing, limitation of exposure to bright light at night, and improved diet quality. Recognition of circadian dysfunction and the investigation of lifestyle interventions should be a priority in both clinical management and future research in PCOS. Conceptual Framework and Summary of the Unified Evolutionary Model The evolutionary model proposes that PCOS is a condition that arises from the inheritance of genomic variants derived from the maternal and paternal genome. In utero fetal metabolic, endocrine and environmental factors modulate developmental programming of susceptible genes and predispose the offspring to develop PCOS. Postnatal exposure to poor-quality diet, sedentary behavior, EDC, circadian disruption and other lifestyle factors activate epigenetically programmed pathways, resulting in the observed features. Dietary factors cause gastrointestinal dysbiosis and systemic inflammation, insulin resistance and hyperandrogenism. Continued exposure to adverse lifestyle and environmental factors eventually leads to the development of associated metabolic conditions such as obesity, GDM, diabetes, NAFLD and metabolic syndrome (Figure 1). Balanced evolutionary selection pressures result in transgenerational transmission of susceptible gene variants to PCOS offspring. Ongoing exposure to adverse nutritional and environmental factors activate developmentally programmed genes and ensure the perpetuation of the syndrome in subsequent generations. The DOHaD cycle can be interrupted at any point from pregnancy to birth, childhood, adolescence or adulthood by targeted intervention strategies (Figure 2). In summary, we propose that PCOS is an environmental mismatch disorder that manifests after in utero developmental programming of a cluster of normal gene variants. Postnatal exposure to adverse lifestyle and environmental conditions results in the observed metabolic and endocrine features. PCOS therefore represents a maladaptive response of ancient genetic survival mechanisms to modern lifestyle practices. Comprehensive International Guidelines have made 166 recommendations for the assessment and management of PCOS. We believe the current unified evolutionary theory of the pathogenesis of PCOS provides a conceptual framework that may help practitioners and patients understand the development of PCOS symptoms and pathology in the context of our modern lifestyle and environment. It will hopefully contribute to improved communication, result in improved feelings of empowerment over the personal manifestations of PCOS, improve compliance, reduce morbidity, increase quality of life and inform future research (Figure 3). Conclusions Substantial evidence and discussion support an evolutionary basis for the pathogenesis of polycystic ovary syndrome, although many of the mechanistic details are yet to be determined. Nevertheless, multiple lines of evidence from evolutionary theory, comparative biology, genetics, epigenetics, metabolism research, and cell biology, provide supportive evidence and hypothesis-generating data. The ability of animals to synchronize internal physiology, metabolism and reproductive function, with our changing external environment and habitat, are a necessary requirement for individual and species survival. Conclusions Substantial evidence and discussion support an evolutionary basis for the pathogenesis of polycystic ovary syndrome, although many of the mechanistic details are yet to be determined. Nevertheless, multiple lines of evidence from evolutionary theory, comparative biology, genetics, epigenetics, metabolism research, and cell biology, provide supportive evidence and hypothesis-generating data. The ability of animals to synchronize internal physiology, metabolism and reproductive function, with our changing external environment and habitat, are a necessary requirement for individual and species survival. The co-operative and sometimes competitive evolution of metabolism and reproduction provided adaptive survival mechanisms in ancestral environments that appear to be maladaptive in modern environments. An evolutionary model therefore provides a framework to enhance practitioner and patient understanding, improve compliance with lifestyle interventions, reduce morbidity, improve quality of life and will evolve and change over time. |
package com.eternalcode.core.configuration.implementations;
import com.eternalcode.core.configuration.AbstractConfigWithResource;
import com.google.common.collect.ImmutableMap;
import net.dzikoysk.cdn.entity.Contextual;
import net.dzikoysk.cdn.entity.Description;
import net.dzikoysk.cdn.entity.Exclude;
import java.io.File;
import java.io.Serializable;
import java.util.Map;
public class CommandsConfiguration extends AbstractConfigWithResource {
public CommandsConfiguration(File folder, String child) {
super(folder, child);
}
@Description({ "# ",
"# This is the part of configuration file for EternalCore.",
"# ",
"# if you need help with the configration or have any questions related to EternalCore, join us in our Discord",
"# ",
"# Discord: https://dc.eternalcode.pl/",
"# Website: https://eternalcode.pl/", " " })
@Exclude
public CommandsSection commandsSection = new CommandsSection();
@Contextual
public static class CommandsSection {
public Map<String, String> commands = new ImmutableMap.Builder<String, String>()
.put("{commands.adminchat}", "adminczaat")
.build();
}
}
|
package com.arjunalabs.android.spikop.data.remote;
import java.util.List;
/**
* Created by bobbyadiprabowo on 21/02/17.
*/
public class SpikListResponseDTO {
private List<SpikResponseDTO> spiks;
public List<SpikResponseDTO> getSpiks() {
return spiks;
}
public void setSpiks(List<SpikResponseDTO> spiks) {
this.spiks = spiks;
}
}
|
Request for Reconsideration: Ozone NAAQS Notice of Proposed Rulemaking and Supporting Documents The authors experienced a 38% loss of sample size among those patients using manual diaries. Accounting for nonresponse bias almost certainly would have made these figures worse. 48 In its response, EPA says only that it " recognizes that PEF measurements have been shown to be more variable than FEV1 in some studies " (U.S. Environmental Protection Agency 2008e, p. 48). 49 The relative variability of PEF to FEV1 measurements is a non sequitur, but it turns out to be a revealing one nevertheless. We deal with this in the following subsection. In their study of asthmatic adults, Ross et al. acknowledge that they had problems with data quality problems that are inherent to the research design: Our study also had shortcomings that are shared by most panel studies, such as the possibility of incorrect data recording by study participants. Previous surveys have reported that diary cards with self-reported PEFR and symptom data may contain a high number of invented or retrospective entries. 50 EPA appears to have been well aware of the problems posed by diary recordation of pulmonary function data. The lead author of Ross et al. is an employee of EPA's Office of Air Quality Planning and Standards. (iv) Information quality defects associated with inter-maneuver variability One of the two studies EPA cites for the observation that PEF measurements are more variable than FEV1 is the study comparing alternative devices by Vaughan et al. a study with which we previously had been 48 Electronic data collection assures that the data collected are accurate, but it does not assure that data will be collected. Medical researchers have concluded that both electronic data collection and sufficient motivation to adhere to the prescribed data collection regimen are essential. See Reddel et al.. 49 See (U.S. Environmental Protection Agency 2006a, pp. 7-27 to 27-47). EPA also tries to rebut Kamps with a paper by Lippmann and Spektor; part of the appeal may be that Lippmann is a longtime CASAC member. The rebuttal paper is off target; it is a comparison of the performance of alternative devices and has nothing to do with the reliability of data recorded in diaries. 50 See Ross et al. (2002, p. 577, internal citations omitted). They authors add: " We would, however, expect these limitations to bias the study results in the direction of nonsignificance. " They do explain why |
import * as React from "react";
import { useDispatch } from "react-redux";
import { exec, getCompletionOptions } from "../../../renderer-shared/commands";
import { useEffect, useRef, useState } from "react";
import { showRunFieldAction } from "../../../renderer-shared/redux/taskbarSlice";
export function RunField() {
const field = useRef<HTMLInputElement>(null);
const [text, setText] = useState("");
const freezeTyping = useRef<boolean>(false);
const enterWhileFrozen = useRef<boolean>(false);
const dispatch = useDispatch();
const reset = () => {
setText("");
dispatch(showRunFieldAction(false));
};
const onChange: React.ChangeEventHandler<HTMLInputElement> = (event) => {
if (freezeTyping.current) {
return;
}
setText(event.target.value);
};
const onKeyDown = (event: React.KeyboardEvent) => {
if (event.ctrlKey || event.shiftKey || event.altKey) {
return;
}
switch (event.key) {
case "Enter":
event.preventDefault();
event.stopPropagation();
if (freezeTyping.current) {
enterWhileFrozen.current = true;
} else if (text) {
exec(text);
reset();
}
break;
case "Escape":
event.preventDefault();
event.stopPropagation();
reset();
break;
case "Tab":
event.preventDefault();
event.stopPropagation();
if (freezeTyping.current) {
break;
}
freezeTyping.current = true;
getCompletionOptions().then((options) => {
const bestOption = selectOption(options, text);
if (bestOption) {
setText(bestOption);
if (enterWhileFrozen.current) {
exec(bestOption);
reset();
}
}
freezeTyping.current = false;
enterWhileFrozen.current = false;
});
break;
}
};
const onBlur = () => {
reset();
};
useEffect(() => {
field.current?.focus();
}, []);
return (
<input
type="text"
value={text}
className="taskbarRunField"
ref={field}
onChange={onChange}
onKeyDown={onKeyDown}
onBlur={onBlur}
/>
);
}
function selectOption(options: string[], text: string): string | undefined {
for (const option of options) {
if (option.startsWith(text)) {
return option;
}
}
return undefined;
}
|
Kahane-Khinchin type averages We prove a Kahane-Khinchin type result with a few random vectors, which are distributed independently with respect to an arbitrary log-concave probability measure on $\R^n$. This is an application of small ball estimate and Chernoff's method, that has been recently used in the context of Asymptotic Geometric Analysis in,. Introduction The classical Kahane's inequality (cf. Kahane ) states that for any 1 ≤ p < ∞ there exists a constant K p > 0 such that holds true for every n and arbitrary choice of vectors x 1,..., x n ∈ X, where X is a normed space with the norm, and r i is a Rademacher function that is given by r i (t) = sign sin(2 i−1 t), i ≥ 1 (for more information about Rademacher functions, see, e.g. Milman and Schechtman, section 5.5). Kwapie proved that K p ∼ √ p is a constant depending only on p. Note that, in view of the definition of r i (t), these integrals in can also be represented as averages (for p ≥ 1) Bourgain, Lindenstrauss and Milman proved that, if v 1,..., v n are unit vectors in a normed space (R n, ) then, for any > 0 there exists a constant C() > 0, such that N = C()n random sign vectors,..., (N ) ∈ {−1, 1} n satisfy with probability greater than 1 − e −cn that for every x ∈ R n, where |||x||| = Ave ± n i=1 ±x i v i. Obviously, the norm |||||| is an unconditional norm, i.e., it is invariant under the change of signs of the coordinates. As shows, it is sufficient to average O(n) terms in rather than 2 n, in order to obtain a norm that is isometric to ||| |||, and in particular symmetrize our original norm with selected vectors {v i } n i=1 to become almost unconditional. In this paper, we change the settings given in, in two ways: First, random sign vectors are replaced by random vectors a,..., a(N ) with an arbitrary log-concave probability measure. Second, we are now interested in using a small number N = (1 + )n of random vectors, at the cost of an isomorphic, instead of an almost isometric, comparison of |||x||| Let v 1,..., v n be unit vectors in a normed space (R n, ). Define a norm ||| ||| on R n : where a i is the i th coordinate of the vector a and is a log-concave probability measure on R n. Theorem. Let N = (1 + )n, 0 < < 1, and let {a,..., a(N ) ∈ R n } be a set of N independent random vectors, distributed with respect to a log-concave probability measure on R n. Then where c() = (c) 1+ 2 and c, c, C > 0 are universal constants. Remark. It is easy to see that once you learn the theorem for small, it holds for large as well. Thus we may always assume that < 0 for some universal constant 0. Remark. The norm ||| ||| depends on the choice of vectors the v 1,..., v n ∈ R n. Different choice of vectors defines a different norm for x ∈ R n. For example, if we choose v 1 =... = v n, it reduces the question above to the scalar case, the norm ||| ||| is the Euclidean norm and we get a result of isomorphic Khinchin-type inequality; these type of results were proved by Litvak, Pajor, Rudelson, Tomczak-Jaegermann and Vershynin, and also by Artstein-Avidan, Friedland and Milman. Remark. The question above was also investigated for any 0 < p < ∞. See, e.g. Bourgain, Rudelson, Giannopoulos and Milman, Gudon and Rudelson. We shall focus on the case of p = 1. Acknowledgement. I thank my supervisor Vitali Milman for encouragement, support and interest in this work. I thank Sasha Sodin and Shiri Artstein-Avidan for useful discussions and remarks on preliminary versions of this note. I thank Sergey Bobkov for suggesting me to look at the continuous case of this problem, and last but not least, I thank Apostolos Giannopoulos for useful remarks and referring me to the paper of Lata la. Proof of Theorem Before we proceed, we give a short description of Chernoff's method. The following lemma, which is a version of Chernoff's bounds, gives estimates for the probability that at least N trials out of N succeed, when the probability of success in one trial is p (cf. Hagerup and Rub ). Lemma 1 (Chernoff). Let Z 1,..., Z N be independent Bernoulli random variables with mean 0 < p < 1, that is, Z i takes value 1 with probability p and value 0 with probability (1 − p). Then we have 1) In the questions above, essentially, we are looking for upper and lower bounds of 1 Upper bounds are relatively easy to obtain, and quite often do not require new methods, but only the use of large deviation inequalities like Bernstein's inequality and some net argument. Obtaining lower bounds is different, usually one needs small ball probabilities, which are hard to get, and some extra delicate arguments which are closely related to the context of the question at hand. Here comes Chernoff's method, if one has a small ball probability for one trial, using Chernoff's bounds, the estimate of the average of many trials can be amplified. For more detailed description of this method, see Artstein-Avidan, Friedland and Milman,. Finally, let us analyze the function where we denoted u() = ln +(1−) ln (1 − ). The term u() is a negative, convex function which approaches 0 as → 0 and as → 1, and is symmetric about 1/2 where it has a minima equal to − ln 2. Thus the whole exponent in Lemma 1 is of the form Proof of Theorem. We estimate the following probability The norm |||x||| N is a random norm depending on the choice of N random vectors a,..., a(N ). Upper bound: We begin by estimating the first term P{∃x ∈ S n−1 ||||||, |||x||| N > C}. This is relatively easy, and does not require a new method; we do it in a similar way to the one in : let N = {y(i)} m i=1 be a 1 2 -net with respect to ||| ||| on S n−1 ||||||, it is known that such a net exists with m ≤ 5 n. For each 1 ≤ i ≤ m we consider the random variables {X i,j } N j=1 defined by where r = E n k=1 a k x k v k = 1 and y(i) k is the k th coordinate of the vector y(i). Clearly, for each i and j, X i,j has mean 0 and X i,j 1 ≤ b for some absolute constant b > 0 (see Milman and Schechtman, App. III). Now, using the well-known Bernstein's inequality which we shall use in the form of 1 estimate (see Vaart and Wellner ): Lemma 2 (Bernstein). Let Y 1,..., Y N be independent random variables with mean 0 such that for some b > 0 and every i, Y i 1 ≤ b. Then, for any t > 0, where c > 0 is an absolute constant. we deduce that for any t > 0 and every 1 ≤ i ≤ m, we have which implies that for a point y(i) ∈ N and for any t > 1 we have P a,..., a(N ) : ). The obvious way to make this probability small enough to handle a large net is to increase t, and obviously we shall get a worse upper bound constant. So, we choose t such that c ) + 1. Then, with probability at least 1 − e −n, for We thus have an upper bound for a net on the sphere. It is standard to transform this to an upper bound estimate on all the sphere (this is an important difference between lower and upper bounds). One uses a consecutive approximation of a point on the sphere by points from the net to get that |||x||| N ≤ 2t = 2t for every x ∈ S n−1 ||||||. This completes the proof of the upper bound, where Lower bound: We now turn to estimate the second term P{(∀y ∈ S n−1 ||||||, |||y||| N ≤ C) and (∃x ∈ S n−1 ||||||, |||x||| N < c())}. Note that when estimating this term, we know in advance that the (random) norm ||| ||| N is bounded from above on the sphere S n−1 |||||| (i.e. ∀y ∈ S n−1 |||||| we have |||y||| N ≤ C, where C comes from the upper bound). This is crucial to transform a lower bound on a net on the sphere to a lower bound on the whole sphere. For the lower bound we use Chernoff's method, as described above, to estimate the probability in. Let us denote by p the probability that for a random vector a ∈ R n we have where > 0 and x is some point on S n−1 |||||| : If "doing an experiment" means checking whether n i=1 a i x i v i ≥ (where a ∈ R n is a random vector) then for |||x||| N to be greater than some c, it is enough that (c/)N of the experiments succeed. Of course, we will eventually not want to do this on all points x on the sphere, but just on some dense enough set. So, first we estimate the probability p : Lemma 3. There exists a universal constant > 0 such that for any x ∈ S n−1 ||||||, we have Proof of Lemma 3. Let us define ||||||, then A x is convex and symmetric set. We take x > 0 to be the number such that (A x ) = 2 3. Applying Borell's lemma (see Milman and Schechtman, App. III.3) we get, for all t > 1, and consequently, for some universal constant c 4 > 0 which doesn't depend on x. Therefore, we get that x ≥ 1 1+c4 > 0. Now, we take = 1 1+c4. For this > 0 and any Now, we can use the following lemma of Lata la with the set for which we proved above that (C x ) ≤ 2 3 = b < 1. Lemma 4 (Lata la ). For each b < 1 there exists a constant c b > 0 such that for every log-concave probability measure and every measurable convex, symmetric set C with (C) ≤ b we have Notice that, for any point x, we can make the probability as small as we like by reducing t. This allows us to use a simple net: take -net N in S n−1 ||||||, with less than ( 3 ) n points. For every x ∈ S n−1 |||||| there is a vector y ∈ N such that |||x − y||| ≤, and we have |||y||| N ≤ |||x||| N + |||x − y||| N ≤ c + C (where c = c() and C comes from the upper bound). Therefore, we bound by By Lemma 4, for a given y ∈ N we have for any 0 < t < 1 that where c 1 = c b 2 3. We return to our scheme, in order to estimate the probability in, assume that t ≥ c + C, where shall be the portion of good trials out of N, and t another constant that we choose later such that p >. So, we know that for < 1 − c 1 t (which is hardly a restriction, t will be very small and so will ), from Lemma 1 for a given y ∈ N we have P{|||y||| N ≥ t} ≥ P{Z 1 + + Z N ≥ N } ≥ 1 − e −N I(,p), where P{Z i = 1} = ({a ∈ R n : n i=1 a i y i v i ≥ t}). We choose so that (1 + )(1 − ) = 1 + 2, hence = 2(1+). We choose = t/2C, where C comes from the upper bound. To make sure that the probability above holds for all points in the net we ask that For the first inequality we use, we choose t = (c 2 ) 2, for some universal constant c 2 > 0, and get the lower bound for each point of the net N. Now using the upper bound, for every x ∈ S n−1 |||||| there is a vector y ∈ N such that |||x − y||| ≤, therefore we have |||x||| N ≥ |||y||| N − |||x − y||| N ≥ t − C =: c, c = c() = (c 3 ) 1+ 2, where c 3 > 0 is an absolute constant. Thus the proof of the lower bound, and of the Theorem, is completed. |
Ask Dog Lady: How do I keep my poodle happy?
Weekly canine Q&A, with advice on neglected dogs and new dogs.
I suffer major guilt because I’ve been neglecting Peanut the poodle. I go to work early in the morning, and my teenage daughter usually walks the dog. But she’s been away at camp this summer and starts school at the crack of dawn in the fall. I have to race Peanut out before the sun comes up or leave it to his dog walker who comes around 11. Peanut hasn’t messed in the house so far. Actually, he’s a very good dog. I take him out for big walks on weekends. I’m wondering if I should get her another dog to keep him company because I’m away so much and the kids have their own lives.
If you have so much guilt about Peanut the First, can you imagine how much greater the responsibility with Peanut the Second? You can always get another dog, but do you have the time to care for two?
Dog Lady never wants to warn anybody away from a dog. However, you would be getting another dog for all the wrong reasons – to please Peanut and not to please you. For now, it sounds as if things are better under control than you think. You have a dog walker. Peanut enjoys enough outside time so he hasn’t been desperate enough to abandon his house training. You have the opportunity to provide walks on weekends.
In our lives, we inevitably go through times when we’re crushingly busy punctuated by periods of indolence. Our pets must go with this flow. Dogs are not children. You don’t have to worry about taking them to soccer practice and other activities. Dogs merely care about walking and sniffing out their news while you lead the way. When you’re not around, they close down until the next wake-up call.
I raised one Labrador retriever, which became THE perfect relationship for 13 years. Lulu was mine until she died of cancer. Nihiz, a giant Schnauzer, lived till she was 17 ½ years. She had loyalty, brains and stubbornness. I now have Sasha, a complete mystery, who keeps biting me with sharp puppy teeth. She thinks my shorts are chew toys, and she’s really aggressive. I want so much to keep her.
So keep her. Why would you even think of giving her up? Because she bites your shorts? Please. This too shall pass. Endure and you will build a relationship similar to the one you enjoyed with Lulu and Nahiz. But remember, Sasha will never be Lulu or Nahiz. She’s a different dog, and you must cut her some slack as she learns to adjust to you and your ways. Your previous dogs grew into you the way a tree root bends around the sidewalk. You had them for many years. You can never expect this puppy to immediately assume the position they enjoyed.
Invest in some bully sticks. These are natural chew sticks and dogs love them. When Sasha gnaws on you, give her a quick “no!” and immediately substitute something she can chomp on. Right now, her teeth inform her actions, and she can’t help herself. |
def p6_sig(self):
self.p6_sigma = self.P6_sig.value()
if round( self.p6_sigma % 1, 1) != 0:
self.p6_sigma = self.round_to_closest( self.p6_sigma, 1 )
self.P6_sig.setValue( self.p6_sigma )
self.p6_sigma = self.add_ns( self.P6_sig.value() ) |
The dog is believed to be a bulldog-type breed.
Thomas Curd poisoned the 22-month-old toddler in a bid to cover up a series of beatings he had previously inflicted.
Five men were jailed for a total of 120 years after trying to smuggle the drugs in a yacht.
The cows hopped, skipped and jumped their way through a field after being allowed outside for the first time in three months.
Agriculture minister George Eustice has resigned over Theresa May’s decision to allow a vote on delaying Brexit.
A fire that broke out in the early hours of the morning has gutted a pub in Newlyn, Cornwall. |
Fan complaints over loot boxes in Battlefront II has led to the complete removal of in-game purchases, although not forever.
Gamers have won their first victory against the evil Galactic Empire. By which we mean that EA has listened to fan complaints and completely removed microtransactions from Star Wars: Battlefront II.
This will be a day long remembered, as it proves once again that if fans complain loud enough then games companies will commit any U-turn in order to placate them. No matter how important the issue seems to be to their plans.
According to website VentureBeat, Disney boss Bob Iger had to phone EA boss Andrew Wilson in order to make it clear that the company was not happy with all the bad publicity the game, and ultimately the Star Wars brand, was getting.
The whole situation brings to mind the unveiling of the Xbox One, which was met with such dislike by fans that Microsoft was forced to change almost every element of their plans for the console.
So for everyone that complained to EA, who cancelled their pre-order for Battlefront II, and otherwise made their anger known (in a civil manner) congratulations: you may have changed the course of video game history.
The victory is not entirely complete though as loot boxes are still in the game, since they’re indelibly tied to the progression system. EA has insisted that microtransactions will return, once they’ve worked out how to make them fairer.
The game itself is released today, and you can find our review here. Which is now, thankfully, outdated in terms of how it describes the loot box system.
Thank you to everyone in our community for being the passionate fans that you are.
Our goal has always been to create the best possible game for all of you – devoted Star Wars fans and game players alike. We’ve also had an ongoing commitment to constantly listen, tune and evolve the experience as it grows. You’ve seen this with both the major adjustments, and polish, we have made over the past several weeks.
But as we approach the worldwide launch, it’s clear that many of you feel there are still challenges in the design. We’ve heard the concerns about potentially giving players unfair advantages. And we’ve heard that this is overshadowing an otherwise great game. This was never our intention. Sorry we didn’t get this right.
We hear you loud and clear, so we’re turning off all in-game purchases. We will now spend more time listening, adjusting, balancing and tuning. This means that the option to purchase crystals in the game is now offline, and all progression will be earned through gameplay. The ability to purchase crystals in-game will become available at a later date, only after we’ve made changes to the game. We’ll share more details as we work through this.
We have created a game that is built on your input, and it will continue to evolve and grow. Star Wars Battlefront II is three times the size of the previous game, bringing to life a brand new Star Wars story, space battles, epic new multiplayer experiences across all three Star Wars eras, with more free content to come. We want you to enjoy it, so please keep your thoughts coming. And we will keep you updated on our progress. |
<reponame>uktrade/tamato
from django.db import models
class WorkflowStatus(models.TextChoices):
# Workbasket can still be edited
EDITING = "EDITING", "Editing"
# Submitted for approval, pending response from an approver
PROPOSED = "PROPOSED", "Proposed"
# Approved and scheduled for sending to CDS
APPROVED = "APPROVED", "Approved"
# Send to CDS and waiting for response
SENT = "SENT", "Sent"
# Received a validation receipt from CDS systems
PUBLISHED = "PUBLISHED", "Published"
# Sent to CDS, but CDS returned an invalid data receipt
ERRORED = "ERRORED", "Errored"
@classmethod
def approved_statuses(cls):
return (
cls.APPROVED,
cls.SENT,
cls.PUBLISHED,
cls.ERRORED,
)
|
'What the patient wants': an investigation of the methods of ascertaining patient values in evidence-based medicine and values-based practice. Evidence-Based Medicine (EBM), Values-Based Practice (VBP) and Person-Centered Healthcare (PCH) are all concerned with the values in play in the clinical encounter. However, these recent movements are not in agreement about how to discover these relevant values. In some parts of EBM textbooks, the prescribed method for discovering values is through social science research on the average values in a particular population. VBP by contrast always investigates the individually held values of the different stakeholders in the particular clinical encounter, although the account has some other difficulties. I argue that although average values for populations might be very useful in informing questions of resource distribution and policy making, their use cannot replace the individual solicitation of patient (and other stakeholder) values in the clinical encounter. Because of the inconsistency of the EBM stance on values, the incompatibility of some versions of the EBM treatment of values with PCH, and EBM's attempt to transplant research methods from science into the realm of values, I must recommend the use of the VBP account of values discovery. |
Learning Analytics to Support Teaching Skills: A Systematic Literature Review Learning Analytics is a vast concept and a rapidly growing field in higher education used by professors to measure, collect and analyze digital learning records to improve learning, generate new pedagogies, and make decisions about technology-driven learning. The following article presents a mapping and systematic literature review on Learning Analytics and its link to the teaching skills carried out in university practice. The research process reviewed 7,886 articles during the period from 2016 to 2020. After applying the inclusion and exclusion criteria, 50 articles were analyzed in-depth under the dimensions of purposes of Learning Analytics, teaching competencies, and teaching practice in higher education. This work provides a basis for identifying gaps and research opportunities related to the application of teaching competencies in the field of Learning Analytics and incorporating it into teaching practice in online tutoring. |
BJP is unnecessarily blowing the land issue out of proportion. This is not a big thing.Which party does not give land to companies to set up project for the development of the area when it comes to power. By creating hype over the issue, BJP is distracting the attention of the people from other things. |
/**
* Search tree node.
*/
class Node {
public int x;
public int y;
public int cost; //path cost
public int movementCost;
public List<Node> path;
/**
* Constructor without path.
*
* @param x Current x value.
* @param y Current y value.
* @param cost Cost so far.
*/
public Node(int x, int y, int cost) {
this.x = x;
this.y = y;
this.cost = cost;
movementCost = cost;
path = new LinkedList<Node>();
}
/**
* Constructor without path.
*
* @param x Current x value.
* @param y Current y value.
* @param cost Cost so far.
* @param parent Parent to inherit path from.
*/
public Node(int x, int y, int cost, Node parent) {
this.x = x;
this.y = y;
movementCost = cost;
this.cost = cost + parent.cost;
path = new LinkedList<Node>();
path.addAll(parent.path);
path.add(parent);
}
} |
Participatory media
Participatory media is media where the audience can play an active role in the process of collecting, reporting, analyzing and disseminating content. Citizen / Participatory journalism, citizen media and democratic media are related principles.
Participatory media includes community media, blogs, wikis, RSS, tagging and social bookmarking, music-photo-video sharing, mashups, podcasts, participatory video projects and videoblogs. All together they can be described as "e-services, which involve end-users as active participants in the value creation process". However, "active [...] uses of media are not exclusive to our times". "In the history of mediated communication we can find many variations of participatory practices. For instance, the initial phase of the radio knew many examples of non-professional broadcasters".
Marshall MacLuhan discussed the participatory potential of media already in the 1970s but in the era of digital and social media, the theory of participatory culture becomes even more acute as the borders between audiences and media producers are blurring. |
Direct and specific least-square fitting of hyperbol and ellipses A new method based on quadratic constrained least- mean-square fitting to simultaneously determine both the best hy- perbolic and elliptical fits to a set of scattered data is presented. Thus a linear solution to the problem of hyperbola-specific fitting is revealed for the first time. Pilu's method to fit an ellipse (with respect to distance) to observed data points is extended to select, without prejudice, both ellipses and hyperbolae as well as their degenerate forms as indicated by optimality with respect to the algebraic dis- tance. This novel method is numerically efficient and is suitable for fitting to dense datasets with low noise. Furthermore, it is deemed highly suited to initialize a better but more computationally costly least-square minimization of orthogonal distance. Moreover, Grass- mannian coordinates of the hyperbolae are introduced, and it is shown how these apply to fitting a prototypical hyperbola. Two new theorems on fitting hyperbolae are presented together with rigorous proofs. A new method to determine the spatial uncertainty of the fit from the eigen or singular values is derived and used as an indicator for the quality of fit. All proposed methods are verified using numeri- cal simulation, and working MATLAB ® programs for the implemen- tation are made available. Further, an application of the methods to automatic industrial inspection is presented. © 2004 SPIE and IS&T. |
<gh_stars>100-1000
package com.refinedmods.refinedstorage.apiimpl.storage.disk;
import com.refinedmods.refinedstorage.api.storage.disk.IStorageDisk;
import com.refinedmods.refinedstorage.api.storage.disk.IStorageDiskFactory;
import com.refinedmods.refinedstorage.api.storage.disk.IStorageDiskManager;
import com.refinedmods.refinedstorage.api.storage.disk.IStorageDiskProvider;
import com.refinedmods.refinedstorage.apiimpl.API;
import com.refinedmods.refinedstorage.apiimpl.util.RSSavedData;
import net.minecraft.nbt.CompoundTag;
import net.minecraft.nbt.ListTag;
import net.minecraft.nbt.Tag;
import net.minecraft.resources.ResourceLocation;
import net.minecraft.server.level.ServerLevel;
import net.minecraft.world.item.ItemStack;
import javax.annotation.Nullable;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
public class StorageDiskManager extends RSSavedData implements IStorageDiskManager {
public static final String NAME = "refinedstorage_disks";
private static final String NBT_DISKS = "Disks";
private static final String NBT_DISK_ID = "Id";
private static final String NBT_DISK_TYPE = "Type";
private static final String NBT_DISK_DATA = "Data";
private final Map<UUID, IStorageDisk> disks = new HashMap<>();
private final ServerLevel level;
public StorageDiskManager(ServerLevel level) {
this.level = level;
}
@Override
@Nullable
public IStorageDisk get(UUID id) {
return disks.get(id);
}
@Nullable
@Override
public IStorageDisk getByStack(ItemStack disk) {
if (!(disk.getItem() instanceof IStorageDiskProvider)) {
return null;
}
IStorageDiskProvider provider = (IStorageDiskProvider) disk.getItem();
if (!provider.isValid(disk)) {
return null;
}
return get(provider.getId(disk));
}
@Override
public Map<UUID, IStorageDisk> getAll() {
return disks;
}
@Override
public void set(UUID id, IStorageDisk disk) {
if (id == null) {
throw new IllegalArgumentException("Id cannot be null");
}
if (disk == null) {
throw new IllegalArgumentException("Disk cannot be null");
}
if (disks.containsKey(id)) {
throw new IllegalArgumentException("Disks already contains id '" + id + "'");
}
disks.put(id, disk);
}
@Override
public void remove(UUID id) {
if (id == null) {
throw new IllegalArgumentException("Id cannot be null");
}
disks.remove(id);
}
@Override
public void markForSaving() {
setDirty();
}
@Override
public void load(CompoundTag tag) {
if (tag.contains(NBT_DISKS)) {
ListTag disksTag = tag.getList(NBT_DISKS, Tag.TAG_COMPOUND);
for (int i = 0; i < disksTag.size(); ++i) {
CompoundTag diskTag = disksTag.getCompound(i);
UUID id = diskTag.getUUID(NBT_DISK_ID);
CompoundTag data = diskTag.getCompound(NBT_DISK_DATA);
String type = diskTag.getString(NBT_DISK_TYPE);
IStorageDiskFactory factory = API.instance().getStorageDiskRegistry().get(new ResourceLocation(type));
if (factory != null) {
disks.put(id, factory.createFromNbt(level, data));
}
}
}
}
@Override
public CompoundTag save(CompoundTag tag) {
ListTag disksTag = new ListTag();
for (Map.Entry<UUID, IStorageDisk> entry : disks.entrySet()) {
CompoundTag diskTag = new CompoundTag();
diskTag.putUUID(NBT_DISK_ID, entry.getKey());
diskTag.put(NBT_DISK_DATA, entry.getValue().writeToNbt());
diskTag.putString(NBT_DISK_TYPE, entry.getValue().getFactoryId().toString());
disksTag.add(diskTag);
}
tag.put(NBT_DISKS, disksTag);
return tag;
}
}
|
Low frequency noise "pollution" interferes with performance. To study the possible interference of low frequency noise on performance and annoyance, subjects categorised as having a high- or low sensitivity to noise in general and low frequency noise in particular worked with different performance tasks in a noise environment with predominantly low frequency content or flat frequency content (reference noise), both at a level of 40 dBA. The effects were evaluated in terms of changes in performance and subjective reactions. The results showed that there was a larger improvement of response time over time, during work with a verbal grammatical reasoning task in the reference noise, as compared to the low frequency noise condition. The results further indicated that low frequency noise interfered with a proof-reading task by lowering the number of marks made per line read. The subjects reported a higher degree of annoyance and impaired working capacity when working under conditions of low frequency noise. The effects were more pronounced for subjects rated as high-sensitive to low frequency noise, while partly different results were obtained for subjects rated as high-sensitive to noise in general. The results suggest that the quality of work performance and perceived annoyance may be influenced by a continuous exposure to low frequency noise at commonly occurring noise levels. Subjects categorised as high-sensitive to low frequency noise may be at highest risk. |
Novel behavior of heat of micellization of pluronics F68 and F88 in aqueous solutions. It is well understood that the heat of micellization for surfactants is monotonically decreased along with an increase in temperature. However, this behavior for polymeric surfactants has never been carefully examined. In this study, the heat of micellization of poly(ethylene oxide)-poly(propylene oxide)-poly(ethylene oxide) (PEO-PPO-PEO) triblock copolymers (Pluronics F68 and F88) in water as a function of temperature is carefully examined by using a high-sensitivity differential scanning calorimeter (HSDSC). The critical micelle temperature (CMT) decreases along with an increase in the concentration of Pluronic F68 (or F88). The heat of micellization decreases along with an increase in the temperature, as expected, when the CMT is higher than 55 and 42 degrees C for Pluronics F68 and F88, respectively. It is interesting to observe that the heat of micellization increases along with the temperature while the temperature is below 55 and 42 degrees C for Pluronics F68 and F88, respectively. The enthalpy-entropy compensation phenomenon for the micellization of Pluronics F68 and F88 in connection with the hydrophobicity is discussed. |
. The objective of this study was to analyze the acceptability, effectiveness, and continuation of long-acting progestagens in certain sectors of the population where its advantages are more obvious by virtue of certain socioeconomic, cultural, and geographic characteristics, and by lack of availability of family planning services, as in rural areas and urban marginal zones. The study was performed using females of reproductive age residing in rural areas of the Mexican States of Hidalgo, Puebla, and Yucatan between July 1981 and September 1982. Originally the investigation included a total of 462 women, of whom 94 were lost to follow-up, leaving a total of 368 patients (79.6%) with effective follow-up. The contraceptive used was 19-Nor progestagen, norethisterone enanthate (NET), in 200 mg doses administered intramuscularly. The 1st dose was applied between the 1st to the 5th day of the menstrual cycle and each 60 calendar days thereafter. Of a total of 94 discontinuations, 2/3 occurred during the 1st 6 months of the study. The final discontinuation rate was 14.46%. The studied population showed a continuation rate of 85.54% after 12 months of use. Observing rates by reasons for discontinuation, the principal causes in descending order were nonmedical reasons, amenorrhea, pregnancy, bleeding and other secondary effects. The greatest number of discontinuations, considering each particular reason, occurred predominantly during the 1st 6 months of the study. Distribution of users by age group show that more than 2/3 (71.8%) were between 20 and 34 years old. Distribution by number of previous pregnancies show 56.5% with 5 or more gestations. To analyze behavior of menstrual cycles, "cycles" were defined as period of 30 calendar days. Under this concept, more than 2/3 (70.9%) of the users had from 1 to 7 days of bleeding per cycle. On the basis of the results of the study, it is possible to conclude the following: 1) Continuation of NET use is greater than that observed with other long-lasting progestagens. This seems to be especially so when employed in areas where limitations exist for the utilization of other contraceptive means. 2) Contraceptive effectiveness is greater when administration is scheduled at 60 day intervals rather than longer intervals. 3) Unlike other long-acting progestagens, NET conserves the normal menstrual pattern in the majority of users, which favorably influences its continuation. |
import { NgModule } from '@angular/core';
import { IonicPageModule } from 'ionic-angular';
import { TermsPage } from './terms';
import { TranslateModule } from '@ngx-translate/core';
@NgModule({
declarations: [TermsPage],
imports: [IonicPageModule.forChild(TermsPage), TranslateModule],
})
export class TermsPageModule {}
|
Pain Severity Correlates With Biopsy-Mediated Colonic Afferent Activation But Not Psychological Scores in Patients With IBS-D INTRODUCTION: Despite heterogeneity, an increased prevalence of psychological comorbidity and an altered pronociceptive gut microenvironment have repeatedly emerged as causative pathophysiology in patients with irritable bowel syndrome (IBS). Our aim was to study these phenomena by comparing gut-related symptoms, psychological scores, and biopsy samples generated from a detailed diarrhea-predominant IBS patient (IBS-D) cohort before their entry into a previously reported clinical trial. METHODS: Data were generated from 42 patients with IBS-D who completed a daily 2-week bowel symptom diary, the Hospital Anxiety and Depression score, and the Patient Health Questionnaire-12 Somatic Symptom score and underwent unprepared flexible sigmoidoscopy. Sigmoid mucosal biopsies were separately evaluated using immunohistochemistry and culture supernatants to determine cellularity, mediator levels, and ability to stimulate colonic afferent activity. RESULTS: Pain severity scores significantly correlated with the daily duration of pain (r = 0.67, P < 0.00001), urgency (r = 0.57, P < 0.0005), and bloating (r = 0.39, P < 0.05), but not with psychological symptom scores for anxiety, depression, or somatization. Furthermore, pain severity scores from individual patients with IBS-D were significantly correlated (r = 0.40, P < 0.008) with stimulation of colonic afferent activation mediated by their biopsy supernatant, but not with biopsy cell counts nor measured mediator levels. DISCUSSION: Peripheral pronociceptive changes in the bowel seem more important than psychological factors in determining pain severity within a tightly phenotyped cohort of patients with IBS-D. No individual mediator was identified as the cause of this pronociceptive change, suggesting that nerve targeting therapeutic approaches may be more successful than mediator-driven approaches for the treatment of pain in IBS-D. INTRODUCTION Chronic abdominal pain and loose stools is a debilitating condition and one of the most common causes of presentation to a gastroenterologist. After excluding inflammatory or infectious disease, most cases are diagnosed as irritable bowel syndrome with diarrhea (IBS-D). However, the cause of the characteristic pain remains obscure. Pain has been attributed to visceral hypersensitivity to otherwise non-noxious stimuli, found in between 50% and 90% of patients. This may arise from a range of abnormalities including enhanced nociception, augmented central pain processing, and impaired adaptation to pain, which may explain the heterogeneity of pathophysiology within patients with irritable bowel syndrome (IBS). Although central factors are undoubtedly important, recognition that IBS could arise after acute infectious gastroenteritis, or postinfectious IBS (PI-IBS), and the chronic changes in mucosal cellularity and mediator content associated with PI-IBS has focused attention on local mucosal abnormalities. Subsequent studies across different subgroups of patients with IBS have broadly documented altered mast cell and endocrine cell numbers and mediators. However, such findings are not universal and the link between mucosal changes, visceral hypersensitivity, and pain symptomology in unselected patients with IBS is variable. These disparities may be due to studying unselected, and hence heterogeneous patients with IBS, rather than specific subtypes. The commonest subtype of PI-IBS is diarrhea predominant, a group in whom several studies have shown alterations in tight junctions and increased permeability. These features are linked to visceral hypersensivity by promoting exposure to luminal content and local immune cell activation. Evidence for these changes have come from biopsy studies that have documented mast cell hyperplasia and elevated levels of mediators such as histamine, serotonin, and tryptase, which contribute to activation of enteric nerves by biopsy supernatants. In some studies, these changes have been shown to correlate with pain scores or pain threshold assessed by rectal barostat, thereby providing a link between local changes in the gut mucosa with pain symptomology. We have previously reported the results of a large proof of concept clinical trial in patients with IBS-D. Nested within the prescreening period of the trial was a mechanistic study that is reported here. We first examined the correlation between symptoms of pain with related sensory abnormalities of urgency, bloating, and psychological factors known to influence pain processing. We then focused on the histology of the sigmoid colonic mucosa and mediator release from incubated biopsies to identify mediators and cell types contributing to IBS pain. Finally, we tested the effect of sigmoid biopsy supernatant on colonic afferent activity and correlated this with pain symptomology. We found considerable heterogeneity in our patient group, with evidence that locally generated mediators are associated with the severity of abdominal pain, with a stronger effect than central psychological factors in this subtype of IBS. Patient details Patients with IBS-D were recruited into a multicentered, parallel group, randomized placebo-controlled trial as previously reported (ClinicalTrial.gov ISRCTN76612274). Nested within the main trial was a mechanistic study of 42 patients who were recruited in the Nottingham center and consented for sigmoid biopsy before randomization. In addition to stool consistency and frequency, other bowel-related symptoms comprising pain severity and daily duration, urgency, and bloating were recorded daily over the 14-day screening period. Pain severity was recorded on a 0-10 scale (0 5 no pain and 10 5 the most severe pain ever experienced) along with daily pain duration in hours per day. Urgency and bloating were also reported on a similar 0-10 scale, stool frequency as bowel movements/day and stool consistency recorded daily using the Bristol Stool Form scale. Symptoms were recorded each evening documenting the preceding 24 hours. All randomization patients completed the Hospital Anxiety and Depression Scale along with the Patient Health Questionnaire-12 Somatic Symptom score, a measure of nongastroenterological somatic symptoms. Sigmoid biopsy and assessments After symptom screening to confirm eligibility for the trial, mucosal biopsies were obtained at 30 cm from the anus during an unprepared, unsedated flexible sigmoidoscopy in the left lateral position. Two biopsies were taken and processed for immunohistochemistry and 2 were cultured to obtain supernatants. Biopsy processing is detailed in the Supplemental Methods (see Supplementary Digital Content 2, http://links.lww.com/CTG/ A510). Two biopsies were weighed and placed into 2 mL of Hanks balanced salt solution in a polystyrene organ culture dish and incubated for two 30 minute periods at 37°C, in 5% CO 2. Fresh Hanks was used for the second, 30-minute incubation period. The supernatant collected in the first 30 minutes was used to assess released mediators, whereas the supernatant collected in the second 30-minute period was used for testing in colonic afferent preparations. Supernatants were aliquoted and stored at 280 o C. Samples were transported on dry ice and aliquots were thawed on the day of use in electrophysiological or laboratory studies. Biopsy supernatant levels of histamine, tryptase, chymase, and carboxypeptidase 3 (CPA3) were measured using sandwich ELISA assays provided by the Immunopharmacology Research Group, the University of Southampton, as described previously. Histamine was measured using a commercially available enzyme immunoassay kit (Neogen, Lexington, KY) as directed by the manufacturer. Ex-vivo recordings of colonic afferent fiber activity Few fiber afferent activities were recorded from teased lumbar splanchnic nerve bundles in a flat sheet colorectal preparation (male 12-weeks old, wild-type C57BL/6 mice, or Na V 1.9 2/2 mice, as previously described ) using suction electrodes. Receptive fields were identified and characterized based on the criteria developed by Brierley et al.. Experiments were only performed on the receptive fields of vascular (or serosal) afferents. Once characterized mechanosensitivity was determined by probing with 0.6 g and 1.0 g von Frey hairs (vFh). Thereafter, a brass ring was placed around the receptive field and the indwelling buffer replaced with biopsy supernatant (100 mL) for 12 minutes, and mechanosensitivity retested after removal of the ring and supernatant. The individual single unit discharge of the receptive field tested was discriminated using template matching software within Spike 2 software (Cambridge Electronic Design, Cambridge, UK) performed over the period of vFh probing. Mechanosensitivity was determined for each weight of vFh before and after supernatant application, and the difference was calculated and expressed in action potentials (spikes) per second. Chemosensitivity to biopsy supernatant was expressed as the increase in afferent discharge over the 12-minute application Power and statistical analysis All analysis was performed using Graphpad Prism Version 7 and above (GraphPad Software, San Diego, CA). Unless otherwise stated, data are expressed as mean 6 SD. Normality of data was tested by using the D'Agostino and Pearson omnibus normality tests, and comparisons were made between parameters for individual patients using Pearson or Spearman correlation coefficients or between group data using a Student t-test or Mann-Whitney U test for parametric and nonparametric data, respectively. Adjustments were made to significant data sets for greater than 3 multiple comparisons using a Bonferroni correction and false discoveries highlighted. Significance was set at P, 0.05 or smaller. Patient details Symptom scores, psychological tests, and sigmoid biopsies were obtained from 42 patients with IBS-D. Clinical details are shown in Table 1. Symptom scores Bowel-related symptoms and their correlation with pain severity. As required for trial entry, patients recorded frequent loose stools (Table 1). Pain severity and bloating scores lay within the mild-to-moderate ranges with slightly higher urgency scores. The mean daily duration of pain experienced was 2.9 6 3.2 hours, with the wide SD highlighting the heterogeneous nature of pain experienced by patients, ranging from brief periods of pain to more prolonged periods of pain. Pain severity was strongly correlated with daily pain duration and urgency and to a lesser degree with bloating and stool frequency, but not with stool consistency (Table 1 and Figure 1). As expected, patient symptom scores for urgency also correlated significantly with stool consistency (Figure 2), but not with stool frequency or bloating. There was no significant correlation between symptom scores for bloating, stool frequency, or stool consistency ( Table 2). Effect of age and sex on bowel-related and psychological symptom scores. No difference was observed in the magnitude of bowel-related symptoms or psychological scores between male and female patients. Furthermore, no correlation was observed between any symptom score and patient age (see Table S3, Supplementary Digital content 1, http://links.lww.com/CTG/A509). Sigmoid biopsies Correlation of pain severity scores with biopsy mediator levels and histology. No significant correlation was observed between pain severity scores and biopsy supernatant levels of histamine, tryptase, chymase, and CPA3 nor with biopsy mast cell or CD68 positive cell counts (see Table S4, Supplementary Digital content 1, http://links.lww.com/CTG/A509). Furthermore, no significant correlation was found between biopsy mediator levels or histology and other bowel-related symptoms (see Table S5, Supplementary Digital content 1, http://links.lww.com/CTG/A509) or psychological scores (see Table S6, Supplementary Digital content 1, http://links.lww.com/CTG/A509). It is perhaps worth noting that there was a correlation between CPA3 and urgency and stool consistency (Table S5, Supplementary Digital content 1, http://links.lww.com/CTG/A509), but after correction for multiple comparisons, this failed to reach conventional significance. Correlation of biopsy-evoked colonic afferent activity with pain severity scores. By contrast, application of biopsy supernatant to the receptive field of colonic afferents produced colonic afferent responses that correlated significantly (P, 0.008) with the severity of pain experienced by the patient from which the biopsy was obtained (Figure 3, Table 4). No correlation was found between the change in colonic afferent mechanosensitivity (0.6 g and 1.0 g vFh) after supernatant application and pain severity scores ( Table 4). Correlation of colonic afferent activity with biopsy mediator levels. No significant correlation was observed between biopsy supernatants mediator levels or biopsy histology findings and respective magnitudes of colonic afferent activation or change in colonic afferent mechanosensitivity (see Table S7, Supplementary Digital content 1, http://links.lww.com/CTG/A509). This suggests that multiple mediators may be responsible for the pronociceptive potential of biopsy supernatants, and these mediators may vary from patient to patient. Effect of age and sex on biopsy responses. Furthermore, no significant difference was found in the colonic afferent response to the application of biopsy supernatant or subsequent change in mechanosensitivity based on the sex of the patient from which the biopsy was taken (see Table S8, Supplementary Digital content 1, http://links.lww.com/CTG/A509). In addition, no effect of patient sex was found on biopsy mediator levels or histology, and no correlation was found between the patients' age and the effect of biopsy supernatant on colonic afferent activity and mechanosensitivity or age and biopsy mediator release or histology (see Table S8, Supplementary Digital content 1, http://links.lww.com/ CTG/A509). Effect of Na V 1.9 deletion on biopsy-mediated colonic afferent responses. Given the lack of correlation of nerve response with individual mediators in the supernatant, an alternative strategy to the treatment of abdominal pain in IBS-D may be to target ion channels responsible for the activation of colonic afferents by multiple mediators. Na V 1.9 is one such channel that is responsible for the sensitization of colonic afferents in response to inflammatory and algogenic mediators consistent with the observation that gain of function human Na V 1.9 mutants display episodic abdominal pain and diarrhea. To highlight the therapeutic potential of Na V 1.9, we also evaluated the effect of biopsy supernatants from patients with the highest pain scores (severity score of 5 or greater) on colonic afferent activity in tissue from Na V 1.9 2/2 mice. We demonstrated a reduced afferent response by comparison to the responses observed when supernatants were tested in wild type tissue (Figure 4a). Furthermore, although colonic afferent mechanosensitivity to von Frey probing was comparable in tissue from C7B6 mice or Na V 1.9 2/2 mice (Figure 4b), the change in mechanosensitivity after supernatant application was also significantly reduced in tissue from Na V 1.9 2/2 mice (Figure 4c). DISCUSSION The aim of this study was to gain insight into putative mechanisms of visceral pain in IBS-D. Although previous studies have shown separately that central psychological factors such as mood and somatization and peripheral mediators can contribute to visceral pain, we have assessed these factors within a single study. An additional strength of our study is that it has been conducted in a single subtype of IBS, using bowel symptom scores recorded in a daily diary rather than retrospective symptom scores across a mixture of IBS subtypes. We have been able to show that although variable, reported pain severity was highly correlated with the overall daily duration of pain. Urgency, a key feature of IBS-D, was also found to be strongly correlated with pain severity, which is perhaps unsurprising, given that urgency is also believed to be driven by colorectal hypersensitivity and points toward a common pathology. However, we found no correlation between pain severity scores and either anxiety, depression, nor somatic sensitivity as assessed by the PHQ-12SS. Our sample was representative of all patients with IBS because the mean scores and the proportion scoring above the upper limit of normal for anxiety and depression were very similar to the published data from a much larger IBS-D patient cohort. As a consequence, our findings indicate that such psychological factors are not the major determinant of pain severity in our IBS-D patient cohort. This does not however exclude a contribution from psychological factors to pain in some patients with more marked psychological disturbances who may not be selected for clinical trials. Our data instead point to a consistent contribution of peripheral factors to pain severity in patients with IBS-D. We examined the effect of biopsy supernatants on colonic afferent activity in a population of lumbar splanchnic afferents classified as vascular afferents and previously shown to display a nociceptor phenotype. Recordings were performed from the lumbar splanchnic nerve because this pathway has previously been shown to be responsible for the transmission of pain from the sigmoid colon, our site of biopsy collection. Consistent with our hypothesis, we found a strong correlation between biopsymediated colonic afferent activation and patient pain severity. Although we found no correlation between individual biopsy mediator levels and pain scores, we speculate that this reflects the range of possible mediators (e.g., histamine, serotonin, PGE 2, and tryptase) that may differ from patient to patient. An alternative explanation for the lack of correlation would be that other unmeasured mediators are important, such as bile acid derivatives, short-chain fatty acids, lipopolysaccharide, or other microbial metabolites. One possible therapeutic approach would be to generally suppress neural activation in the periphery while avoiding the side effects associated with actions on higher centers. To illustrate the utility of this approach, we also examined the effect of biopsy supernatants from patients with high pain severity scores on colonic afferent activity in tissue from Na V 1.9 2/2 mice, a channel highly expressed in colonic afferents, and possibly implicated in IBS-D visceral nociception because episodic abdominal pain and diarrhea has been reported in the gain of function human mutants. Consistent with our previous findings that colonic afferent response to algogenic mediators and supernatants generated from inflammatory bowel disease (IBD) patient tissue are attenuated in tissue from Na V 1.9 2/2 mice, we also observed a significant reduction in the response to IBS-D biopsy supernatants in Na V 1.9 mouse tissue. A further observation from this study was the significant correlation between urgency scores and stool consistency, suggesting that looser stools may contain mediators such as bile acids and fecal proteases that could promote urgency by stimulating colorectal afferents. This concept is supported by previous reports that urgency correlates with fecal tryptase. Future studies are now warranted to explore these possibilities and the effect of biopsy supernatants on afferent fiber subtypes responsible for the perception of urgency such as pelvic afferents fibers within the colorectum. In conclusion, using a tightly defined patient group, we were able to show a strong correlation between pain severity and the stimulation of colonic afferent activity by biopsy supernatants, suggesting that in this patient group, the peripheral influences are more significant that central ones. However, we found no evidence for the dominant role of any one of the mediators examined suggesting that either there is another, as yet unmeasured, mediator or that targeting multiple mediator pathways may be a better strategy than targeting a single specific pathway. Future studies should include larger panels of potential mediators that may be present in the stool of patients with IBS. ACKNOWLEDGMENTS We would like to thank the Sister Andrea Bennet and research nurses of the Nottingham Biomedical Research Unit for their invaluable support for these studies and Dr. Andrew Walls and the Immunopharmacology Group, University of Southampton, who performed the mediator assays.. Effect of biopsy supernatants (from patients with IBS-D with pain scores.5) in tissue from Na V 1.9 2/2 compared with wild type mice. Bar charts illustrating (a) the reduced colonic afferent response to supernatant, (b) the comparable magnitude of evoked mechanosensitivity in wild type and Na V 1.9 2/2 tissue before supernatant application, and (c) the lower change mechanosensitivity after supernatant application in tissue from Na V 1.9 2/2 compared with wild type mice. Open Access This is an open access article distributed under the terms of the Creative Commons Attribution-Non Commercial-No Derivatives License 4.0 (CCBY-NC-ND), where it is permissible to download and share the work provided it is properly cited. The work cannot be changed in any way or used commercially without permission from the journal. Study Highlights WHAT IS KNOWN 3 Peripheral and psychological factors contribute to symptomology in irritable bowel syndrome (IBS). 3 Peripheral factors contribute to pain in IBS by stimulating sensory nerves. 3 Psychological factors contribute to pain in IBS by promoting hypervigilance and increasing stress responses. WHAT IS NEW HERE 3 Pain severity scores correlated with daily duration of pain, and urgency, but not with anxiety, depression, or somatization in patients with IBS-D. 3 Pain severity scores correlated with biopsy-mediated colonic afferent firing. 3 Individual biopsy mediator levels did not predict pain severity. TRANSLATIONAL IMPACT 3 Nerve targeting therapies may be more effective than specific mediator antagonists. |
export { Banner } from "./Banner";
export { BannerProps } from "./Banner.types";
|
<filename>zheli-ware/src/main/java/com/aiz/zhelimall/ware/feign/ProductFeignService.java
package com.aiz.zhelimall.ware.feign;
import com.aiz.common.utils.R;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
/**
* @ClassName ProductFeignService
* @Description
* @Author Yao
* @Date Create in 13:25 2020/7/23 0023
* @Version 1.0
*/
@FeignClient("zheli-product")
public interface ProductFeignService {
/**
* /product/skuinfo/info/{skuId}
*
* 1).让所有请求过网关;
* 1.@FeignClient("zheli-gateway"):给zheli-gateway所在的机器发请求
* 2./api/product/skuinfo/info/{skuId}
* 2).直接让后台指定服务处理
* 1.@FeignClient("zheli-gateway")
* 2./product/skuinfo/info/{skuId}
*
* @return
*/
@RequestMapping("/product/skuinfo/info/{skuId}")
public R info(@PathVariable("skuId") Long skuId);
}
|
The Broken Heart: The Role of Life Events in Takotsubo Syndrome The onset of Takotsubo syndrome (TTS), also known as stress cardiomyopathy, is thought to be associated with some life events. This study focuses on clarifying life event characteristics and the role of triggers in the onset of TTS. Participants with TTS (n = 54) were compared to those with acute myocardial infarction (AMI; n = 52) and healthy individuals (n = 54). Using a modified version of the Interview for Recent Life Events, information about general life events perceived as stressful and triggers preceding the onset of a cardiac syndrome was collected. The assessment included the impact of these events as indicated by the participants and estimated by the interviewer; finally, the objective impact was considered. Although the number of events and the objective impact did not differ among the groups, patients with TTS reported a more negative perceived impact. Moreover, 61% of these patients objectively and subjectively reported a more stressful trigger before the onset of the disease (in the 24 h preceding the cardiac event) than those reported by patients with AMI. The dynamic between life events and individual responses could help differentiate TTS from other cardiovascular events, such as AMI. This study suggests that patients perception of some life events (whether triggers or general life events) could represent a possible marker of TTS. The prevalence of TTS is 1-3% in all patients with an acute coronary syndrome manifestation. Recent data have reported an annual TTS incidence between 50,000 and 100,000 cases in the general population. These studies have also suggested that the prevalence increases by up to 10% in women, accounting for approximately 85-90% of patients with TTS. The age range for diagnosis is commonly between 67 and 70 years. The definition of TTS as stress cardiomyopathy underlines the critical role of some life events as stressors in its onset. Unlike other heart failure etiologies, such as AMI, a hallmark of TTS is the higher incidence of a trigger before the cardiac event, as reported in about 70% of patients. The trigger occurs within 24 h before the onset of the cardiovascular event and could be emotional (e.g., grief, argument, separation) or physical (e.g., physical trauma, brain trauma, surgery). Despite the identification of many physical and emotional triggers (e.g., divorce, public speaking, conflict, severe fright, stress at work, physical illness) by case reports and cohort studies on TTS [11,, no study has systematically assessed the nature and characteristics of triggers related to TTS onset. Early studies suggested that both negative and positive life events could play a role in TTS onset. These events are known to elicit significant emotional responses involving the sympathetic neurohormonal axis and parallel overstimulation of the catecholaminergic system by affecting the cardiovascular system. Moreover, some authors have reported a high prevalence of TTS in patients with a history of psychopathological disorders (e.g., anxiety and depression) commonly associated with emotional dysregulation and a misperception of life and environmental events [8,. These findings potentially support the hypothesis that the nature of the triggers would not determine the onset of TTS per se, but rather that individual perception and response to trigger events could play a role in the onset of TTS. Surprisingly, no study has analyzed how patients with TTS perceive the emotional impact of life events or triggers related to TTS onset. Accordingly, this study aims to understand the role and characteristics of both life events and triggers in TTS onset. The main aim of this study was twofold. The first aim was to analyze life events and their impact on patients with a TTS diagnosis and compare their perception of these events to that of patients with an AMI diagnosis and individuals without a history of heart pathology. The second aim was to verify the differences in the presence and nature of triggers in patients with TTS and AMI. We expected an overestimation of the emotional and stressful impact of life events in patients with TTS compared to those with AMI and healthy individuals. In the group comprising TTS patients, we also hypothesized a greater frequency of triggers, especially emotional ones, and a higher negative personal perception of their impact than in those with an AMI diagnosis. Inclusion and Exclusion Criteria For patients with TTS, the inclusion criteria were acute onset of symptoms, no culprit lesion on coronary angiography, typical 'apical ballooning', elevated cardiac biomarkers, and normalized left ventricle systolic function on follow-up echocardiography. Diagnostic evaluation in TTS patients within our Institution has been previously described. For patients with AMI, the inclusion criteria were acute onset of symptoms, elevated cardiac biomarkers, a diagnosis of myocardial infarction made by a cardiologist, and consequent hospitalization. For healthy participants, the inclusion criteria were absence of any heart disease in clinical history, an age similar to that of the two groups of patients. To control possible confounding variables or aspects that could influence the dimensions assessed by the study, for all participants, general inclusion criteria were: absence of severe chronic medical conditions (e.g., cancer, ictus, autoimmune diseases); absence of neurological (e.g., epilepsy) and psychiatric diseases (e.g., schizophrenia, bipolar disorder, major depressive disorder) or diagnosis of dementia or other cognitive impairments. To achieve the first aim of this study, life events and their impact were considered in the overall sample. To attain the second aim, only the two groups with hearth pathologies were considered to ascertain the trigger's impact on patients with TTS and AMI. Physiological Measures In adherence to the European Guidelines for the assessment of blood pressure, systolic (SBP) and diastolic blood pressure (DBP), and heart rate (HR) were recorded through an electronic sphygmomanometer validated for self-measurement. Moreover, the Body Mass Index (BMI; kg/m 2 ) was calculated through participants' weight and height measurements. Sociodemographic and Anamnestic Information A face-to-face interview was conducted to collect sociodemographic (age, education, occupation, marital status) data. An investigation was also undertaken to seek information about medical history, including any psychiatric consultation or psychological treatment, as well as the presence of potential risk factors for cardiovascular pathologies. Specifically, the interview collected information about the medical conditions of hypertension, hypercholesterolemia, and hyperglycemia. Subsequently, information about lifestyles was reported: smoking habits (number of cigarettes smoked every day), alcohol consumption (number of glasses drunk daily), coffee consumption (number of cups drunk daily), and adequate physical activity (yes/no). All pharmacological treatments at the time of evaluation were also listed. Modified Version of Interview for Recent Life Events (IRLE) The IRLE is a semistructured interview based on the Paykel Events Scale wherein 63 life events are recorded in different categories: work, education, economic problems, health, grief, emigration, family relationships, social relationships, and other events. The IRLE requires participants to analyze the life events, focusing on the six months preceding the interview or the disease's onset. The time, frequency, and a succinct elucidation of each event are recorded. The interviewerestimated the Independence and Objective Negative Impact of each reported event on two 5-points Likert scales. The Independence concerns the probability that the event may or may not have caused the illness. The Objective Negative Impact refers to the level of unpleasant impact, stress, or threat that the individual is expected to face due to the event, considering its nature and circumstances. While the participant's characteristics must not influence it, the circumstances of its occurrence for both the patient and the event must be considered. However, this index expresses the limits of the interviewer's characteristics. For these reasons, our group made some modifications to the IRLE. These modifications were aimed at better understanding the consequences of an event on individual health by also considering the subjective perception of the event (see Table 1). Three indices were analyzed in our adaptation of IRLE for each life event reported: Table 1) to a general sample of 512 Italian respondents (354 women and 158 males, mean age: 32.7 ± 13.56; mean years of education: 15.60 ± 3.33). This evaluation required the participants to respond to the following question: "how do you think this event generally affects the life of a person?" (c) The Estimated Impact by the interviewer (Interviewer-estimated Impact): this indicates the negative impact that the event had on the participant life according to the interviewer's view on a 5-point Likert scale (1: low negative impact; 5: high negative impact). This index deviates from the average of impacts estimated by two independent observers (inter-rater concordance: r = 0.976). Table 1. Adaptation of the Paykel Events Scale (Paykel, 1971 ). Miscarriage or stillbirth Child married against respondent's wishes Natural calamities (e.g., earthquake, floods) Marital separation not due to argument Marital difficulties of a close family member New person in household Legal problems of a close family member Retirement Unemployment Promotion Occupational hazards (e.g., at work, etc.) Change in work Injuries (e.g., road accidents) ( All three impacts were reported for each event listed by the participants. The interview was aimed at investigating both the eventual triggers associated with the two cardiac events (TTS and AMI) and the main life events within the last six months reported by the participant. Considering the triggers, events characterized by intense negative emotions were classified as Emotional Triggers, whereas those entailing pain or high fatigue were classified as Physical Triggers. Considering the three impacts, two indices were considered: the impact of the Trigger Events, the mean impact of all the Life Events reported in the last six months. Procedure The research was conducted according to the Declaration of Helsinki principles, and was approved by the Ethics Committee of the Department of Dynamic and Clinical Psychology and Health studies of the University of Rome Sapienza (Prot. n. 0000664). A total of 97 patients were selected between January 2017 and January 2019 from the database at the Cardiology Department (54 TTS and 52 AMI patients), Vannini Hospital of Rome. A cardiologist selected patients who had been diagnosed and admitted to the Vannini hospital with TTS or AMI. All selected patients had no other chronic pathologies (cancer, diabetes, respiratory or neurological disorders). All had been discharged from the hospital for at least 3 months and were in good health. Patients diagnosed with TTS or AMI were briefly presented with the research and asked to participate in the evaluation voluntarily. Considering the number of patients in each group, we set the number of healthy controls as 54. Data Analysis Univariate Analyses of Variance (ANOVAs) were carried out to assess the differences between the three groups (HP, TTS, AMI) in age, years of education, some cardiac risk factors, e.g., BMI, cigarette consumption (n./day), alcohol consumption (glasses/day), caffeine consumption (cups/day), clinical recordings (SBP, DBP, HR), and Life Events' Impact reported by the IRLE (Objective, Subject-Indicated, Interviewer-estimated). The planned comparisons were adopted to verify possible differences among groups. An 2 test was used to compare the differences in the percentage of the categorical variables among groups. Specifically, marital status, occupational status, presence of some cardiac risk factors (i.e., hyperglycemia, hypertension, hypercholesterolemia, family history of cardiovascular diseases), and nature of trigger (No trigger, Emotional Trigger, Physical Trigger) were tested. To analyze the characteristics of trigger events, ANOVAs for Trigger Impact (Objective, Subject-Indicated, Interviewer-estimated) were carried out between the two groups of patients (TTS, AMI). Univariate Analyses of Covariance (ANCOVAs) were carried out on the indices of the impact of the Paykel interview, considering age and years of education as covariates to control the influence of these variables on the negative impacts of both life events and trigger events in the different groups. A Bonferroni's correction was applied to reduce Type 1 error risk, and a p ≤ 0.02 was accepted. Table 2 reported the differences between groups in demographic (age, year of education, gender, marital status, occupational status), cardiac risk factor (cigarettes, alcohol, and caffeine consumption, hyperglycemia, workout, hypercholesterolemia, hypertension, family history of CDVs, and Body Mass Index) and Clinical recordings (SBP, DBP, HR) of the three groups of participants (HP, TTS, AMI). Demographics and Lifestyle Variables Participants showed significant differences in age (F2,157 = 5.90; p = 0.003) and years of education (F2,157 = 4.19; p = 0.02). The TTS group were older and had fewer years of education than both the AMI (Age: p = 0.003; Years of Education: p = 0.01) and HP groups (Age: p = 0.005; Years of Education: p = 0.02). The ANOVAs for the cardiac risk factors highlighted significant differences among the groups in BMI (F2,157 = 3.88; p = 0.02), smoking habits (F2,157 = 4.49; p = 0.01) and caffeine consumption (F2,157 = 7.34; p = 0.001), while no differences between groups emerged in alcohol consumption (F < 1; p = 0.47). The TTS group showed lower BMI than both the HP (p = 0.02) and AMI groups (p = 0.02). The TTS group had a lower cigarette consumption than the AMI group (p = 0.003). No differences between the AMI and HP groups emerged for both BMI (p = 0.95) and cigarette consumption (p = 0.11). The AMI group drank more cups of coffee per day than the TTS group (p = 0.0002); the coffee consumption was also marginally higher in the AMI group than the HP group (p = 0.04), while the TTS and HP groups were not significantly different (p = 0.07). The ANOVAs for the clinical recordings showed significant differences between groups for SBP (F2,157 = 6.89; p = 0.001) and HR (F2,157 = 3.23; p = 0.04). No differences were highlighted for DBP (F2,157 = 1.60; p = 0.20). The AMI group showed lower SBP than both the HP (p = 0.04) and TTS groups (p = 0.0003), but the TTS and HP groups were not different (p = 0.11). Furthermore, the TTS group showed lower HR than the HP group (p = 0.02), while HR of the AMI group was not different from HR of both the TTS (p = 0.60) and HP groups (p = 0.06). The 2 for the cardiac risk factors did not show significant differences among the three groups of participants in the percentage of hyperglycemia and hypercholesterolemia or alcohol consumption and physical activity. The TTS group presented a lower CVD family history percentage than both the AMI and HP groups (p = 0.02). Both the groups with heart diseases reported a higher percentage of hypertension than HP (p = 0.0001). IRLE's Life Events' Impacts The ANOVA for the number of events reported by the groups in the IRLE showed only marginally significant differences (F2,157 = 3.44; p = 0.03). The TTS group indicated a higher number of events compared with the HP group (p = 0.01); while the TTS and AMI (p = 0.16), and AMI and HP groups (p = 0.25) did not indicate a different number of life events. IRLE's Trigger Events' Impact Overall, 27% of patients with AMI (14 out of 52) and 61% of patients with TTS (33 out of 56) reported a trigger before the cardiac event. This difference was significant ( 2 = 4.93; p = 0.03). The TTS group reported 85% of these to be Emotional Triggers (28 out of 33) and 15% to be Physical Triggers (5 out of 33). The AMI group reported the same percentage of Emotional and Physical Triggers (50%, 7 out of 14). A significant difference between groups in the percentage of Emotional Triggers was observed ( 2 = 9.13; p = 0.003), while no significant differences emerged considering Physical Triggers (see Table 4). IRLE's Trigger Events' Impact Overall, 27% of patients with AMI (14 out of 52) and 61% of patients with TTS (33 out of 56) reported a trigger before the cardiac event. This difference was significant ( 2 = 4.93; p = 0.03). The TTS group reported 85% of these to be Emotional Triggers (28 out of 33) and 15% to be Physical Triggers (5 out of 33). The AMI group reported the same percentage of Emotional and Physical Triggers (50%, 7 out of 14). A significant difference between groups in the percentage of Emotional Triggers was observed ( 2 = 9.13; p = 0.003), while no significant differences emerged considering Physical Triggers (see Table 4). The ANOVAs for the Trigger Event Impacts showed significant differences between the two groups in the levels of Objective Impact (F1,45 = 7.60; p = 0.01), Interviewerestimated Impact (F1,45 = 11.78; p = 0.001; p 2 = 0.21), and Subject-indicated Impact (F1,45 = 17.79; p = 0.0001). The TTS group reported a higher score than the AMI group in all types of impact ( Figure 2). Discussion Many studies have reported highly stressful life events as triggers of TTS [, and in some cases, the IRLE was adopted to analyze the impact of stressor patterns on the clinical manifestation of TTS. Due to some structural limitations of this interview, it was impossible to explore all relevant aspects of the stressful events that may serve as triggers for TTS. In particular, the subjective perception of the event was not examined, which is surprising, given that each event can be considered a stressor according to subjective perception of it. To overcome this shortcoming, we introduced a person's subjective judgment parameter. Furthermore, we have partially attempted to remodel the objective weight of the events, estimating it on an Italian sample. Our adaptation of IRLE makes it possible to determine three critical aspects: the supposed objective impact that a life event has on individuals, which determines the adaptive appraisal that the individual must adopt for an adequate response to that event ; the subjective impact that a life event generates in each individual, which can be influenced by personal characteristics (e.g., personality traits, previous life experiences, personal resources, biological response ); and the estimated impact of a life event as determined by the interviewer, using their knowledge of common adaptive responses by a reasonable individual. Objective Impact Estimated Impact Indicated Impact Discussion Many studies have reported highly stressful life events as triggers of TTS [8,11,16,, and in some cases, the IRLE was adopted to analyze the impact of stressor patterns on the clinical manifestation of TTS. Due to some structural limitations of this interview, it was impossible to explore all relevant aspects of the stressful events that may serve as triggers for TTS. In particular, the subjective perception of the event was not examined, which is surprising, given that each event can be considered a stressor according to subjective perception of it. To overcome this shortcoming, we introduced a person's subjective judgment parameter. Furthermore, we have partially attempted to remodel the objective weight of the events, estimating it on an Italian sample. Our adaptation of IRLE makes it possible to determine three critical aspects: the supposed objective impact that a life event has on individuals, which determines the adaptive appraisal that the individual must adopt for an adequate response to that event ; the subjective impact that a life event generates in each individual, which can be influenced by personal characteristics (e.g., personality traits, previous life experiences, personal resources, biological response ); and the estimated impact of a life event as determined by the interviewer, using their knowledge of common adaptive responses by a reasonable individual. The main results of this study showed that stressful events are perceived differently by patients with TTS when compared with both healthy people and patients with AMI. Patients with TTS tend to perceive life events more negatively than the interviewer. Surprisingly, the objective impact of life events was not greater in patients with TTS. Many studies have reported that chronic stress and multiple adverse life events are possible risk factors for TTS onset. However, this is the first study to focus on the impact indicated by the subjects (i.e., participants' perception of the events) compared to the objective impact of the event. Since patients with TTS do not report a higher number of stressful events than patients with AMI, this result may indicate that subjective perception rather than the presence of specific life events could signify a risk factor for TTS onset. The higher SBP and HR values in patients with TTS in comparison with the other two groups of participants could be contingent on their emotional reactions, emotion dysregulation, and dysfunctional coping strategies generally associated with higher blood pressure levels. Another aspect emerging from this study is that the modified version of the IRLE facilitates an improved definition of the trigger characteristics associated with cardiac events. Numerous studies have indicated that TTS is strongly associated with a stressor event, leading to a higher increase in catecholamine levels in TTS than in other heart diseases. This increase could probably be attributed to sympathetic hyperactivation. The relationship between a stressful trigger and the pathophysiological response of the cardiovascular system has prompted researchers to define a potential interaction between the brain and heart in response to stressful life events that cause TTS. This brain-heart activation involves the limbic system (i.e., amygdala, insula, hippocampus, cingulate cortex), autonomic nervous system, and hypothalamic-pituitary-adrenal axis (HPA). Our results confirmed a higher frequency of triggers in patients with TTS compared with their counterparts with AMI, although the prevalence (61%) is lower than that reported by previous studies, which showed a prevalence rate between 70 and 90%. However, these differences are probably due to small sample sizes, as most previous studies comprised case reports (for a review ). Emotional triggers occurred more frequently than physical triggers, which contrasts with the finding of a recent systematic review of 1330 case reports, in which physical triggers (e.g., drugs, surgery, brain trauma) were more common than emotional triggers in TTS. Emotional triggers (e.g., death of a close person, serious illness of a family member, dismissal, severe financial difficulties, etc.; see Table 1) could have a higher negative impact than physical triggers, thus explaining the high prevalence associated with TTS onset. Life events, which involve a relevant emotional response, would increase the activation of HPA as an automatic and adaptive response. This activation could lead to cardiac syndromes by generating cascading events in vulnerable individuals. Another interesting result is the higher impact of triggers in patients with TTS compared to those with AMI, shown by the IRLE in the three assessed indices. Although the patients reported a misperception of life events, characterized by a more negative perception, the trigger is objectively characterized by a higher stressful impact. According to this finding, possible markers of TTS onset could be the interaction between the person's misinterpretation of life events and the objectively high impact of the trigger, generating physical distress. This result corroborates prior findings, suggesting an association between a change in emotional competencies and metacognitive strategy and the presence of emotional triggers in patients with TTS. Further studies should examine the role of metacognitive abilities and the impact or occurrence of emotional triggers in TTS onset, and the influence of these abilities on the subjective and estimated impact of life events. These results should also be interpreted considering the general distinctive characteristics of patients with TTS and AMI. Unhealthy lifestyles, such as smoking habits and excessive body weight, were identified as risk factors for heart disease and are considered indicators of reduced life expectancy. In our study, participants with TTS smoked fewer cigarettes per day and were less likely to smoke than those affected by AMI. Moreover, patients with TTS had a lower BMI than both healthy people and patients with AMI. These results confirm the findings of previous studies, which reported fewer cardiovascular risk factors in patients affected by TTS. Notably, the psychophysiological profile of patients with TTS differs from that of patients with AMI, thus indicating a different etiology of the two cardiac events. Patients with TTS are expected to have healthier lifestyles than patients with AMI, but they presented an interpretation bias in life events. A similar number of stressful life events, characterized by comparable severity, affected people who developed TTS and AMI, but the negative perception of these events was higher in patients with TTS. This could be a typical trait in patients with TTS. In this case, it would be appropriate to investigate better psychological factors in patients with TTS, such as the strategies utilized for coping with stressful events and the ability to regulate their emotional responses. Some limitations of this study should be considered when interpreting these results. The main limitation of this study is the gender imbalance between the two groups of patients, which renders its results preliminary. However, our data are consistent with those of epidemiological studies, which indicated a higher incidence of TTS diagnosis in women and a higher incidence of AMI in men. Further studies should consider balancing the groups for gender and age to facilitate interesting interpretations. For example, gender control could help analyze the role of some predisposing factors (e.g., lack of estrogen replacement or hormonal alteration) in the relationship between stress and TTS. Moreover, age control could also elicit interesting outcomes regarding the effects of cardiac events in different stages of life. Moreover, an analysis of the psychological characteristics of patients affected by TTS could help specify aspects (e.g., coping strategies and emotional regulation) involved in stress management. This, in turn, could help explain why the onset of TTS is seemingly related to a poor response to adverse events. Although an adequate number of patients were analyzed, the AMI group's low number of triggers is another limitation that prevents additional inferences about trigger differences between patients with TTS and AMI. Another possible limitation of this study is the absence of an analysis focused on positive events and their impact on individuals. Keeping in mind the studies that define TTS as a possible happy-heart syndrome, the analysis of positive events is an interesting research direction. The IRLE structure prevented the examination of this dimension; therefore, further studies are recommended. Finally, the methodological limitations should be highlighted as there could be bias linked to the absence of a blinding strategy in the interviewers' IRLE rating. Conclusions The present findings represent a first step in analyzing life events and their possible impact on patients with TTS. The impact of life events could be a psychological marker of this syndrome. The dynamic between life events and the individual stress response could influence an individual's physiological activation and determine the onset of a cardiovascular event. Studies on this topic are critical for differentiating this cardiovascular syndrome from others. For this purpose, it is essential to better define the aspects involved in TTS onset. It would be interesting to strengthen this analysis, including the relationship between the brain and heart, for example, with an analysis of heart-rate variability, representing a gold standard measure for assessing autonomic activation. Furthermore, it would facilitate the definition of the relationship between cognitive and psychological characteristics and the physiological response of the individual. |
<gh_stars>0
/*************************************************************************
*
* [2017] - [2018] Automy Inc.
* All Rights Reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of Automy Incorporated and its suppliers,
* if any. The intellectual and technical concepts contained
* herein are proprietary to Automy Incorporated
* and its suppliers and may be covered by U.S. and Foreign Patents,
* patents in process, and are protected by trade secret or copyright law.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from Automy Incorporated.
*/
#include <vnx/vnx.h>
#include <vnx/Util.h>
#include <vnx/ProcessClient.hxx>
#include <mutex>
#include <random>
#ifdef _WIN32
#include <windows.h>
#else
#include <termios.h>
#endif
namespace vnx {
std::string string_subs(std::string str, const std::string& from, const std::string& to) {
size_t start_pos = 0;
while((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // handles case where 'to' is a substring of 'from'
}
return str;
}
std::vector<std::string> string_split(const std::string& str, char sep, bool clean) {
std::vector<std::string> res;
std::string tmp;
for(char c : str) {
if(c == sep) {
if(!tmp.empty() || !clean) res.push_back(tmp);
tmp.clear();
} else {
tmp.push_back(c);
}
}
if(!tmp.empty() || !clean) res.push_back(tmp);
return res;
}
uint64_t rand64() {
static std::mutex mutex;
static bool is_init = false;
static std::mt19937_64 generator;
std::lock_guard<std::mutex> lock(mutex);
if(!is_init) {
generator.seed(get_wall_time_nanos());
is_init = true;
}
return generator();
}
std::string input_password(const std::string &prompt){
#ifdef _WIN32
HANDLE console = GetStdHandle(STD_INPUT_HANDLE);
DWORD saved_attributes = 0;
GetConsoleMode(console, &saved_attributes);
SetConsoleMode(console, saved_attributes & ~ENABLE_ECHO_INPUT);
#else
termios saved_attributes;
tcgetattr(0, &saved_attributes);
termios tmp = saved_attributes;
tmp.c_lflag &= ~ECHO;
tmp.c_cc[VMIN] = 1;
tmp.c_cc[VTIME] = 0;
tcsetattr(0, TCSANOW, &tmp);
#endif
std::string result;
vnx::ProcessClient proc("vnx.process");
proc.pause_log();
std::cout << prompt;
std::getline(std::cin, result);
std::cout << std::endl;
proc.resume_log();
#ifdef _WIN32
SetConsoleMode(console, saved_attributes);
#else
tcsetattr(0, TCSANOW, &saved_attributes);
#endif
return result;
}
} // vnx
|
A pub was raided and six people were arrested as police launched a sweeping crackdown on the “Only Fools and Horses” trade in stolen goods.
About 20 officers swooped on the Three Legs on The Headrow in Leeds city centre yesterday (Feb 6) following intelligence that it was being used effectively as a marketplace to buy and sell illicit items.
Two of the suspects are arrested
Superintendent Pat Casserly, who led the operation, said there was evidence it was possible to go into the pub with a “shopping list” of items that could be stolen to order.
He added: “Yes, this is Yorkshire, yes we like a bargain, yes we don’t want to pay over the odds, but this isn’t Rodney Trotter and Only Fools and Horses having a casual laugh about things that fell off the back of a lorry – this is serious.
“This is crime that’s undermining people’s jobs in Leeds and it’s also perpetuating the misery of people having their homes burgled and we’re not having it.”
After executing a warrant, police searched the premises and punters, as well as using sniffer dogs to detect drugs.
Four women and two men – including at least one involved in running the pub – were arrested on suspicion of theft after officers seized a large number of suspect items including wallets, purses and handbags. Police were last night working with the council to close the pub down.
Supt Casserly said: “The venues where shoplifters are selling stolen gear have become almost a marketplace in themselves and we are aware that some shoplifters are actually being commissioned, tasked, with going out and stealing certain things.
“In effect it’s feasible for a person to go into a certain pub and give a shoplifter almost a shopping list and while that person sits at the bar having a drink that shoplifter is out stealing the things to order and then selling them at a knock-down price.”
Yesterday’s raid was part of Operation Viper – targeting some of West Yorkshire’s most prolific criminals. Police will be raiding properties across the county in a three-day blitz. |
/**
* Convert uint16_t byte array into a signed integer
*
* @param bytes a 1-byte byte array
* @return signed integer value of bytes
*/
public static int bytesToSignedInt(byte[] bytes) {
if (bytes.length < 1) {
return 0;
}
return (int) bytes[0];
} |
/**
* Constants used for extracting the column of the desired field.
*/
public final class IndexDataConstants {
public static final String AGE = "Age";
// Glucose Mandatory
public static final String FASTING_GLUCOSE = "Fasting Glucose";
public static final String GLUCOSE_THREE = "Glucose Three";
public static final String GLUCOSE_SIX = "Glucose Six";
public static final String GLUCOSE_NINE = "Glucose Nine";
public static final String GLUCOSE_ONE_TWENTY = "Glucose One Twenty";
// Insulin Mandatory
public static final String FASTING_INSULIN = "Fasting Insulin";
public static final String INSULIN_THREE = "Insulin Three";
public static final String INSULIN_SIX = "Insulin Six";
public static final String INSULIN_NINE = "Insulin Nine";
public static final String INSULIN_ONE_TWENTY = "Insulin One Twenty";
// Optional data
public static final String WEIGHT = "Weight";
public static final String HEIGHT = "Height";
public static final String HDL = "HDL";
public static final String NEFA = "Nefa";
public static final String TRIGLYCERIDE = "Triglyceride";
public static final String THYROGLOBULIN = "Thyroglobulin";
} |
A device configuration management tool for context-aware system Automation industry is moving towards more complex systems which are posing new challenges for operation from both machine and human perspectives. A group of challenges is related to management of the overwhelming information flow and to usability of the systems, and context-aware solutions have been recently introduced to the automation field in order to cope with challenges of this kind. The context awareness is seen as a solution which would allow to both the technological system and the human operator to infer the optimal decisions and to behave in the most effective way. In order to reach this capability, the external physical world and the system must be described in a way both interprtable for humans and machines. Ambition of this paper is to contribute to the context-aware (re)configuration of the system with a tool, which is designed to improve the efficiency of the configuration phase of a context-aware system. The tool provides a solution to configure and model the field devices of a system via automatically generated ontologies. This research is a part of a device management framework for a building automation use case, which is targeting to support controlling decisions of dwellers, technical support and social services. |
<reponame>codelibs/fess-search-engine
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.codelibs.fesen.action.bulk;
import static org.codelibs.fesen.core.TimeValue.timeValueMillis;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicInteger;
import org.codelibs.fesen.action.bulk.BackoffPolicy;
import org.codelibs.fesen.core.TimeValue;
import org.codelibs.fesen.test.ESTestCase;
public class BackoffPolicyTests extends ESTestCase {
public void testWrapBackoffPolicy() {
TimeValue timeValue = timeValueMillis(between(0, Integer.MAX_VALUE));
int maxNumberOfRetries = between(1, 1000);
BackoffPolicy policy = BackoffPolicy.constantBackoff(timeValue, maxNumberOfRetries);
AtomicInteger retries = new AtomicInteger();
policy = BackoffPolicy.wrap(policy, retries::getAndIncrement);
int expectedRetries = 0;
{
// Fetching the iterator doesn't call the callback
Iterator<TimeValue> itr = policy.iterator();
assertEquals(expectedRetries, retries.get());
while (itr.hasNext()) {
// hasNext doesn't trigger the callback
assertEquals(expectedRetries, retries.get());
// next does
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
// next doesn't call the callback when there isn't a backoff available
expectThrows(NoSuchElementException.class, () -> itr.next());
assertEquals(expectedRetries, retries.get());
}
{
// The second iterator also calls the callback
Iterator<TimeValue> itr = policy.iterator();
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
}
}
|
AutoCellThe Self-Organizing WLAN By definition, IEEE 802.11 wireless LANS (WLANs) are constantly in flux. There is no way to predict where a particular client will be at any moment, making it equally impossible to predetermine the load on any wireless access point (AP). How will 802.11 Wi-Fi networks running in adjoining offices or on different floors affect one another? Even something as seemingly trivial as rearranging cubicle partitionsor people standing in front of an APcan affect network conditions. |
#include "obstacle.h"
void Obstacle::init()
{
size_t total_len = 360 * 3 * 2;
float *base_verts = new float[total_len];
for (size_t i = 0;i < 360; ++i) {
base_verts[i*6] = _center.x;
base_verts[i*6+1] = _center.y;
base_verts[i * 6 + 2] = _center.x + _radius * glm::cos(glm::radians((float)i));
base_verts[i * 6 + 3] = _center.y + _radius * glm::sin(glm::radians((float)i));
base_verts[i * 6 + 4] = _center.x + _radius * glm::cos(glm::radians((float)i+1.0f));
base_verts[i * 6 + 5] = _center.y + _radius * glm::sin(glm::radians((float)i+1.0f));
//std::cout << (float)i << " " << (float)i+1.0f << " " << glm::sin(glm::radians((float)i+1.0f)) << std::endl;
//std::cout << base_verts[i * 6] << " " << base_verts[i * 6 + 1] << std::endl;
//std::cout << base_verts[i * 6+2] << " " << base_verts[i * 6 + 3] << std::endl;
//std::cout << base_verts[i * 6+4] << " " << base_verts[i * 6 + 5] << std::endl;
}
glGenVertexArrays(1, &this->_vao);
glGenBuffers(1, &this->_vbo);
glBindBuffer(GL_ARRAY_BUFFER, this->_vbo);
glBufferData(GL_ARRAY_BUFFER, total_len * sizeof(float), base_verts, GL_STATIC_DRAW);
glBindVertexArray(this->_vao);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
delete[] base_verts;
}
void Obstacle::draw()
{
this->_shader.use();
// Calculate model matrix
glm::mat4 model = glm::mat4(1.0f);
//model = glm::translate(model, cur_center - _initial_center);
glUniformMatrix4fv(
glGetUniformLocation(this->_shader.id(), "model"),
1,
false,
glm::value_ptr(model)
);
glUniform3f(
glGetUniformLocation(this->_shader.id(), "in_color"),
this->_color.x, this->_color.y, this->_color.z
);
glBindVertexArray(this->_vao);
glDrawArrays(GL_TRIANGLES, 0, 360 * 3);
glBindVertexArray(0);
}
void Obstacle::move(const glm::vec3 &to)
{
_center += to;
}
|
<gh_stars>100-1000
#!/usr/bin/env python3
"""
Utilities for converting between colorspaces. Includes the following:
* `rgb_to_hsl` (same as `matplotlib.colors.rgb_to_hsv`)
* `hsl_to_rgb` (same as `matplotlib.colors.hsv_to_rgb`)
* `hcl_to_rgb`
* `rgb_to_hcl`
* `hsluv_to_rgb`
* `rgb_to_hsluv`
* `hpluv_to_rgb`
* `rgb_to_hpluv`
Note
----
This file is adapted from `seaborn
<https://github.com/mwaskom/seaborn/blob/master/seaborn/external/husl.py>`__
and `hsluv-python
<https://github.com/hsluv/hsluv-python/blob/master/hsluv.py>`__.
For more information on colorspaces see the
`CIULUV specification <https://en.wikipedia.org/wiki/CIELUV>`__, the
`CIE 1931 colorspace <https://en.wikipedia.org/wiki/CIE_1931_color_space>`__,
the `HCL colorspace <https://en.wikipedia.org/wiki/HCL_color_space>`__,
and the `HSLuv system <http://www.hsluv.org/implementations/>`__.
"""
# Imports. See: https://stackoverflow.com/a/2353265/4970632
# The HLS is actually HCL
import math
from colorsys import hls_to_rgb, rgb_to_hls
# Coefficients or something
m = [
[3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[0.0557, -0.2040, 1.0570]
]
m_inv = [
[0.4124, 0.3576, 0.1805],
[0.2126, 0.7152, 0.0722],
[0.0193, 0.1192, 0.9505]
]
# Hard-coded D65 illuminant (has to do with expected light intensity and
# white balance that falls upon the generated color)
# See: https://en.wikipedia.org/wiki/Illuminant_D65
# Also: https://github.com/hsluv/hsluv-python/issues/3
refX = 0.95047
refY = 1.00000
refZ = 1.08883
refU = 0.19784
refV = 0.46834
lab_e = 0.008856
lab_k = 903.3
def hsluv_to_rgb(h, s, l):
return lchuv_to_rgb(*hsluv_to_lchuv([h, s, l]))
def hsluv_to_hex(h, s, l):
return rgb_to_hex(hsluv_to_rgb(h, s, l))
def rgb_to_hsluv(r, g, b):
return lchuv_to_hsluv(rgb_to_lchuv(r, g, b))
def hex_to_hsluv(color):
return rgb_to_hsluv(*hex_to_rgb(color))
def hpluv_to_rgb(h, s, l):
return lchuv_to_rgb(*hpluv_to_lchuv([h, s, l]))
def hpluv_to_hex(h, s, l):
return rgb_to_hex(hpluv_to_rgb(h, s, l))
def rgb_to_hpluv(r, g, b):
return lchuv_to_hpluv(rgb_to_lchuv(r, g, b))
def hex_to_hpluv(color):
return rgb_to_hpluv(*hex_to_rgb(color))
def lchuv_to_rgb(l, c, h):
return CIExyz_to_rgb(CIEluv_to_CIExyz(lchuv_to_CIEluv([l, c, h])))
def rgb_to_lchuv(r, g, b):
return CIEluv_to_lchuv(CIExyz_to_CIEluv(rgb_to_CIExyz([r, g, b])))
def hsl_to_rgb(h, s, l):
h /= 360.0
s /= 100.0
l /= 100.0 # noqa
return hls_to_rgb(h, l, s)
def rgb_to_hsl(r, g, b):
h, l, s = rgb_to_hls(r, g, b)
h *= 360.0
s *= 100.0
l *= 100.0 # noqa
return h, s, l
def hcl_to_rgb(h, c, l):
return CIExyz_to_rgb(CIEluv_to_CIExyz(lchuv_to_CIEluv([l, c, h])))
def rgb_to_hcl(r, g, b):
l, c, h = CIEluv_to_lchuv(CIExyz_to_CIEluv(rgb_to_CIExyz([r, g, b])))
return h, c, l
def rgb_prepare(triple):
ret = []
for ch in triple:
ch = round(ch, 3)
if ch < -0.0001 or ch > 1.0001:
raise Exception(f'Illegal RGB value {ch:f}.')
if ch < 0:
ch = 0
if ch > 1:
ch = 1
# the +0.001 fixes rounding error
ret.append(int(round(ch * 255 + 0.001, 0)))
return ret
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def hex_to_rgb(color):
if color.startswith('#'):
color = color[1:]
r = int(color[0:2], 16) / 255.0
g = int(color[2:4], 16) / 255.0
b = int(color[4:6], 16) / 255.0
return [r, g, b]
def max_chroma(L, H):
hrad = math.radians(H)
sinH = (math.sin(hrad))
cosH = (math.cos(hrad))
sub1 = (math.pow(L + 16, 3.0) / 1560896.0)
sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)
result = float('inf')
for row in m:
m1 = row[0]
m2 = row[1]
m3 = row[2]
top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)
rbottom = (0.86330 * m3 - 0.17266 * m2)
lbottom = (0.12949 * m3 - 0.38848 * m1)
bottom = (rbottom * sinH + lbottom * cosH) * sub2
for t in (0.0, 1.0):
C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))
if C > 0.0 and C < result:
result = C
return result
def hrad_extremum(L):
lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0)
+ 768.0 * L + 4096.0) / 1560896.0
rhs = 1107.0 / 125000.0
sub = lhs if lhs > rhs else 10.0 * L / 9033.0
chroma = float('inf')
result = None
for row in m:
for limit in (0.0, 1.0):
[m1, m2, m3] = row
top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub \
- 603093295.0 * limit
bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub
hrad = math.atan2(top, bottom)
if limit == 0.0:
hrad += math.pi
test = max_chroma(L, math.degrees(hrad))
if test < chroma:
chroma = test
result = hrad
return result
def max_chroma_pastel(L):
H = math.degrees(hrad_extremum(L))
return max_chroma(L, H)
def hsluv_to_lchuv(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma(L, H)
C = mx * S / 100.0
# if C > 100.0:
# raise ValueError(f'HSL color {triple} is outside LCH colorspace.')
return [L, C, H]
def lchuv_to_hsluv(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma(L, H)
S = 100.0 * C / mx
return [H, S, L]
def hpluv_to_lchuv(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma_pastel(L)
C = mx * S / 100.0
# if C > 100.0:
# raise ValueError(f'HPL color {triple} is outside LCH colorspace.')
return [L, C, H]
def lchuv_to_hpluv(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma_pastel(L)
S = 100.0 * C / mx
return [H, S, L]
def dot_product(a, b):
return sum(i * j for i, j in zip(a, b))
# return sum(map(operator.mul, a, b))
def from_linear(c):
if c <= 0.0031308:
return 12.92 * c
else:
return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
def to_linear(c):
a = 0.055
if c > 0.04045:
return (math.pow((c + a) / (1.0 + a), 2.4))
else:
return (c / 12.92)
def CIExyz_to_rgb(triple):
CIExyz = map(lambda row: dot_product(row, triple), m)
return list(map(from_linear, CIExyz))
def rgb_to_CIExyz(triple):
rgbl = list(map(to_linear, triple))
return list(map(lambda row: dot_product(row, rgbl), m_inv))
def CIEluv_to_lchuv(triple):
L, U, V = triple
C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
hrad = (math.atan2(V, U))
H = math.degrees(hrad)
if H < 0.0:
H = 360.0 + H
return [L, C, H]
def lchuv_to_CIEluv(triple):
L, C, H = triple
Hrad = math.radians(H)
U = (math.cos(Hrad) * C)
V = (math.sin(Hrad) * C)
return [L, U, V]
# Try setting gamma from: https://en.wikipedia.org/wiki/HCL_color_space
# The 3.0 used below should be the same; don't mess with it
gamma = 3.0 # tunable? nah, get weird stuff
def CIEfunc(t):
if t > lab_e:
return (math.pow(t, 1.0 / gamma))
else:
return (7.787 * t + 16.0 / 116.0)
def CIEfunc_inverse(t):
if math.pow(t, 3.0) > lab_e:
return (math.pow(t, gamma))
else:
return (116.0 * t - 16.0) / lab_k
def CIExyz_to_CIEluv(triple):
X, Y, Z = triple
if X == Y == Z == 0.0:
return [0.0, 0.0, 0.0]
varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
L = 116.0 * CIEfunc(Y / refY) - 16.0
# Black will create a divide-by-zero error
if L == 0.0:
return [0.0, 0.0, 0.0]
U = 13.0 * L * (varU - refU)
V = 13.0 * L * (varV - refV)
return [L, U, V]
def CIEluv_to_CIExyz(triple):
L, U, V = triple
if L == 0:
return [0.0, 0.0, 0.0]
varY = CIEfunc_inverse((L + 16.0) / 116.0)
varU = U / (13.0 * L) + refU
varV = V / (13.0 * L) + refV
Y = varY * refY
X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
return [X, Y, Z]
|
Victoria Legrand, the voice behind dream-pop duo Beach House, met longtime collaborator Alex Scally in the early 2000s in the burgeoning and mercurial Baltimore indie scene. The two quickly realized that they shared common ground with music and found the urge to express that creativity. And it just sort of happened.
“We didn’t really talk about it,” she said. “Sharing a love of other music. Becoming best friends. Having just a kinship. Agreeing without having to talk about it.”
Coming up in Baltimore was integral to forming their sound. Legrand became nostalgic while discussing the scene in the earliest days, glowing over what she saw as a loving and supportive community. It was that environment that helped the band grow and evolve in a way that felt organic.
“It’s always been a place where you can be whatever you want to be,” she said. “Baltimore has always been … you can just be yourself.”
How do you articulate just how much you love music? For Legrand, it’s all encompassing. She breathes music, and it flows through her, manifesting itself in her collaborations with Scally. Writing together isn’t just second nature, but required for Legrand, who expresses a clear admiration for her musical cohort.
“I could never do a solo thing,” she said. “Alex and I have always written together. It’s a collaboration that’s tried and true. We always bring it to each other and make it together.”
Music has always been a focal point of Legrand’s life. At an early age, she started with piano lessons, an instrument that has had an obvious influence on her compositions.
Advertisement
And that search for the sound still continues to inspire her: “The sounds, whatever you think is cool, if that little flame is still in the song … and the story … if you’ve created something that goes somewhere. You just have this feeling. I don’t know how to describe it. It makes you feel something. You didn’t damage it. You didn’t crush it. You didn’t turn it into something it’s not.”
Legrand added, “Sometimes it’s just your lucky day. You hear all the words. They just flow out of you. They’re coming out of you instantly. Those are amazing moments. Then you feel like you’re an antennae. If you’re into that and that’s what gets you going, then you love music. You have to know when to walk away and when to push. That’s why it’s a craft. It’s a sensitivity to composition and a respect to the emotion.”
After more than a decade, Legrand is as excited today about making music as she was when it all started. But she doesn’t measure her success by any physical accumulation of wealth or property, but by the privilege that they’ve had in sharing their music on a national stage.
“Success is deeply personal,” she said. “This band has had success, because to me, the first time that anyone came to a show … that exchange with human beings — that’s success. That’s such a beautiful relationship with the universe. When you made something and someone appreciates it — that’s success. A successful person has money and a house, but to be successful you have to love and be loved in return. All you have is the love that you have and can give to the world. That’s all you have. If you mess that up or don’t see it — that’s failure.”
Pausing, she applied that sentiment to a larger context: “Look at our country. Look at what’s going on. People have to love each other. If we don’t have that … forget about it.” |
Assessing synovitis with conventional static and dynamic contrast-enhanced magnetic resonance imaging in knee osteoarthritis. Knee osteoarthritis (KOA) is one of the most common causes of physical disability in the elderly population. With an increasing ageing and obese population, the prevalence of KOA is expected to rise substantially. The needs for a better understanding of the disease and tools that can predict the course of the disease, for example following treatment, are therefore imperative. Inflammation has over the last years been recognised as an important factor for both the symptomatology and disease course in KOA. Synovitis, inflammation of the synovium, is the hallmark of intra-articular inflammation and has been associated with pain, symptoms and disease progression. Synovitis can be visualised on conventional static MRI. However, the addition of a dynamic contrast-enhanced (DCE) MRI-sequence enables the assessment of the synovium both in regards of its morphology and perfusion. Studies in both KOA and rheumatoid arthritis have shown that DCE-MRI measures of synovitis are more sensitive than conventional static MRI in regards of microscopic synovitis and patient-reported outcome measures (PROMs). The aims of this PhD project were to characterise synovitis in KOA with conventional static and DCE MRI in regards of histology (study I), its association with PROMs (studies II-III) and changes following a symptoms-improving intervention (study III). We found that DCE-MRI-measures of synovitis seem to be superior to conventional static MRI in their association with histological synovitis (study I) and pain (study II) in a cross-sectional setting. However, the use of DCE-MRI over conventional static CE-MRI cannot be justified when assessing the long-term changes in synovitis following an intervention with intra-articular corticosteroids/placebo and exercise (study III). Evidence is mounting that KOA is constituted of different phenotypes. There is an urgent need to define these in order to improve and individualise treatment and management. It is essential to gain a better understanding of the different pro-cesses taking place in KOA, on an individual level and in the different stages of the disease. DCE-MRI may very well be a useful tool in facing these challenges especially in regards of the role of perfusion and inflammation in KOA and osteoarthri-tis in general. |
An Access Control Solution For The Inter-Organizational Use Of ITIL Federated Configuration Management Databases Governance, Risk, and Compliance (GRC) Management is on the edge of becoming one of the most important business activities for enterprises. Consequently, IT departments and IT service providers must sharpen their alignment to business processes and demands. Fulfilling these new requirements is supplemented by best practice frameworks, such as ITIL, which define a complete set of IT Service Management (ITSM) processes. Many ITSM processes rely on accurate information which is provided by the Configuration Management (CM) process and stored in a database called CMDB. As it is next to impossible to store all the necessary data in a single huge database, the distributed storage of so-called configuration items and their relationships has become rather wide-spread and is termed CMDB federation (CMDBf). In this paper, we first present the need of inter-organizational-CMDBf usage, e.g. in outsourcing scenarios, by means of a real-world scenario. Based on this requirement, we introduce our concept of an ioCMDBf, discuss how it can be used by the ITSM processes of all involved orga- nizations, and present a policy-based access control architecture for the ioCMDBf which makes use of state-of-the-art identity federation tech- nology. |
The present invention relates to an image reconstruction method and an X-ray computed tomography (CT) system. More particularly, the present invention relates to a method of reconstructing an image on the basis of a plurality of views of projection data items provided by X-rays having passed through a subject, and an X-ray CT system performing the image reconstruction.
X-ray CT systems acquire a plurality of views of projection data items from a subject, and reconstruct an image on the basis of the projection data items. For the image reconstruction, processes such as pre-processing, reconstruction, and post-processing devised on the assumption of visual assessment of an image are employed. Therefore, even when computer-aided detection (CAD) is performed based on a reconstructed image, an image reconstructed on the assumption of the visual assessment is employed (refer to, for example, Patent Document 1).
[Patent Document 1] Japanese Unexamined Patent Application Publication No. 2004-070562 (pp. 4–5, FIG. 1)
In general, images for use in visual assessment have a high-frequency component thereof enhanced so that an image will appeal to naked eyes. Algorithms adapted to CAD include an algorithm that employs a differential filter. There is therefore a possibility that the differential filter function defined for CAD and a reconstruction function defined for visual assessment may enhance a noise to make recognition of an image, which is performed during CAD, unstable, and degrade a lesion image detection rate. |
<gh_stars>10-100
package com.oberasoftware.jasdb.core.utils.conversion;
import com.oberasoftware.jasdb.api.exceptions.CoreConfigException;
import com.oberasoftware.jasdb.core.utils.StringUtils;
public class ValueConverterUtil {
public static long convertToBytes(String memorySize) throws CoreConfigException {
return convertUnit(memorySize, new MemoryTypeConverter());
}
public static long convertToBytes(String memorySize, long defaultValue) throws CoreConfigException {
try {
return convertUnit(memorySize, new MemoryTypeConverter());
} catch(CoreConfigException e) {
return defaultValue;
}
}
public static long convertToMilliseconds(String timeSpan) throws CoreConfigException {
return convertUnit(timeSpan, new TimeTypeConverter());
}
public static long convertToMilliseconds(String timeSpan, long defaultValue) throws CoreConfigException {
try {
return convertUnit(timeSpan, new TimeTypeConverter());
} catch(CoreConfigException e) {
return defaultValue;
}
}
private static long convertUnit(String unit, ValueConverterType converter) throws CoreConfigException {
if(StringUtils.stringNotEmpty(unit)) {
try {
char lastDigit = unit.charAt(unit.length() - 1);
lastDigit = Character.toLowerCase(lastDigit);
long longUnit = -1;
if(Character.isDigit(lastDigit)) {
longUnit = Long.valueOf(unit);
} else {
String value = unit.substring(0, unit.length() - 1);
longUnit = Long.valueOf(value);
}
return converter.convertToLong(lastDigit, longUnit);
} catch(NumberFormatException e) {
throw new CoreConfigException("Unable to parse value in unit: " + unit, e);
}
} else {
throw new CoreConfigException("Could not parse empty unit");
}
}
public static int safeConvertInteger(String integer, int defaultValue) {
try {
return Integer.valueOf(integer);
} catch(NumberFormatException e) {
return defaultValue;
}
}
}
|
def build_interpreter_string(self):
interpreter_string = ''
for key, value in self._environment.items():
interpreter_string += 'export {0}={1} ;'.format(key, value)
interpreter_string += self._python_path
return interpreter_string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.