content
stringlengths
7
2.61M
/**************************************************************************** * arch/arm/src/efm32/hardware/efm32_memorymap.h * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ #ifndef __ARCH_ARM_SRC_EFM32_HARDWARE_EFM32_MEMORYMAP_H #define __ARCH_ARM_SRC_EFM32_HARDWARE_EFM32_MEMORYMAP_H /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include "chip.h" #if defined(CONFIG_EFM32_EFM32TG) # include "hardware/efm32tg_memorymap.h" #elif defined(CONFIG_EFM32_EFM32G) # include "hardware/efm32g_memorymap.h" #elif defined(CONFIG_EFM32_EFM32GG) # include "hardware/efm32gg_memorymap.h" #else # error "Unsupported EFM32 memory map" #endif #endif /* __ARCH_ARM_SRC_EFM32_HARDWARE_EFM32_MEMORYMAP_H */
// PossibleSpatialTypeValues returns the possible values for the SpatialType const type. func PossibleSpatialTypeValues() []SpatialType { return []SpatialType{ SpatialTypeLineString, SpatialTypeMultiPolygon, SpatialTypePoint, SpatialTypePolygon, } }
import React from 'react' interface Props {} export default function ClipboardIcon(props: Props) { return ( <svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg"> <path fillRule="evenodd" clipRule="evenodd" d="M2.05056 0H9.68692V1.27273H2.05056V10.1818H0.777832V1.27273C0.777832 0.569819 1.34765 0 2.05056 0ZM11.596 2.54545H4.59601C3.89311 2.54545 3.32329 3.11527 3.32329 3.81818V12.7273C3.32329 13.4302 3.89311 14 4.59601 14H11.596C12.2989 14 12.8687 13.4302 12.8687 12.7273V3.81818C12.8687 3.11527 12.2989 2.54545 11.596 2.54545ZM11.596 12.7273H4.59601V3.81818H11.596V12.7273Z" fill="#A2B2C2" /> <mask id="mask0" mask-type="alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="13" height="14" > <path fillRule="evenodd" clipRule="evenodd" d="M2.05056 0H9.68692V1.27273H2.05056V10.1818H0.777832V1.27273C0.777832 0.569819 1.34765 0 2.05056 0ZM11.596 2.54545H4.59601C3.89311 2.54545 3.32329 3.11527 3.32329 3.81818V12.7273C3.32329 13.4302 3.89311 14 4.59601 14H11.596C12.2989 14 12.8687 13.4302 12.8687 12.7273V3.81818C12.8687 3.11527 12.2989 2.54545 11.596 2.54545ZM11.596 12.7273H4.59601V3.81818H11.596V12.7273Z" fill="white" /> </mask> <g mask="url(#mask0)"> <rect width="14" height="14" fill="#A2B2C2" /> </g> </svg> ) }
<reponame>hossamelgendy1/SWEII-Project package SWProject; import java.util.ArrayList; public class SystemData implements ISystemData { private static SystemData instance; private ISystemDataStrategy dataContainer; private SystemData(){ dataContainer = new ArrayListStrategy(); //default type } public static SystemData getInstance(){ if (instance == null) instance = new SystemData(); return instance; } @Override public void setStrategy(ISystemDataStrategy strategy) { dataContainer = strategy; } @Override public IAdmin getAdmin(String username) { return dataContainer.getAdmin(username); } @Override public boolean addRegistrationRequest(IRegistrationRequest registrationRequest) { return dataContainer.addRegistrationRequest(registrationRequest); } @Override public boolean addDriver(IDriver driver) { return dataContainer.addDriver(driver); } @Override public boolean addPassenger(IPassenger passenger) { return dataContainer.addPassenger(passenger); } @Override public boolean addOffer(IOffer offer) { Notifier.getInstance().notifyPassengerWithOffer(offer); return dataContainer.addOffer(offer); } @Override public boolean addRide(IRide ride) { Notifier.getInstance().notifyDriversWithRide(ride); return dataContainer.addRide(ride); } @Override public boolean addRating(IRating rating) { Notifier.getInstance().notifyDriverWithRating(rating); return dataContainer.addRating(rating); } @Override public IRegistrationRequest getRegistrationRequest(int index) { return dataContainer.getRegistrationRequest(index); } @Override public IRegistrationRequest getRegistrationRequest(String username) { return dataContainer.getRegistrationRequest(username); } @Override public IDriver getDriver(String username) { return dataContainer.getDriver(username); } @Override public IPassenger getPassenger(String username) { return dataContainer.getPassenger(username); } @Override public boolean removeRegisrationRequest(IRegistrationRequest registrationRequest) { return dataContainer.removeRegisrationRequest(registrationRequest); } @Override public boolean displayAllRegistrations() { return dataContainer.displayAllRegistrations(); } @Override public ArrayList<IDriver> getDriversWithFavouriteArea(String area) { return dataContainer.getDriversWithFavouriteArea(area); } @Override public ArrayList<IOffer> getOffersOfPassenger(IPassenger passenger) { return dataContainer.getOffersOfPassenger(passenger); } @Override public ArrayList<IOffer> getOffersOfDriver(IDriver driver) { return dataContainer.getOffersOfDriver(driver); } @Override public ArrayList<IRating> gerRatingsOfDriver(IDriver driver) { return dataContainer.gerRatingsOfDriver(driver); } @Override public ArrayList<IRide> getRidesOfDriver(IDriver driver) { return dataContainer.getRidesOfDriver(driver); } @Override public boolean containsRideOfPassenger(IPassenger passenger) { return dataContainer.containsRideOfPassenger(passenger); } @Override public boolean containsDiscountArea(String destination) { return dataContainer.containsDiscountArea(destination); } @Override public boolean addDiscountArea(String area) { return dataContainer.addDiscountArea(area); } @Override public boolean removeDiscountArea(String area) { return dataContainer.removeDiscountArea(area); } }
Project-based learning in a collaborative group can enhance student skill and ability in the biochemical laboratory: a case study ABSTRACT Experimental courses for undergraduate students majoring in biochemistry or related subjects often do not provide students with systematic and research-based experiences. To help students develop abilities related to laboratory techniques, data analysis, and systematic thought in biology, we performed an exploratory program that employs project-based learning in collaborative groups. The participants (total of 18 students) organized themselves into groups of 24 students, and each group researched an enzyme that had not been described previously. The program began with a literature survey of enzyme and bioinformatics analysis. The students cloned the gene encoding the enzyme, purified the enzyme, and, finally, analyzed the enzymes catalytic characteristics. The students explained the catalytic mechanism, integrating their experimental data and other knowledge. An instructor provided support and training during the process to support effective teamwork and to cultivate a habit of independence that is believed to be useful for the students future careers. The assessment showed that the pilot program yielded an improvement in the participant laboratory skills, scientific presentation ability, and experimental design ability. These analyses indicated that the small-scale practice in this study provided benefits to the students and the methods may be popularized to a large extent.
Can we tell what patients die of? Does it matter? In this issue of the Journal, Ridgeon and colleagues attempt to create a classifi cation system for causes of death in critical care for research purposes.1 Their article reminded me of a medical emergency team (MET) call that I attended recently. When I arrived, there was a frail, cyanosed elderly woman, curled up in bed, surrounded by buzzing nurses and doctors. She had been found unresponsive in bed, was making agonal gasps and had no palpable pulse, although the monitor showed a slow ventricular escape rhythm. Staff were uncertain how long she had been in this state, but she had been in hospital for 2 weeks after admission with heart failure. My assessment was that, despite the lack of a pre-existing treatment limitation order, the team should not perform cardiopulmonary resuscitation. It was extremely unlikely that it would be successful and allow this woman to return to a meaningful life, so she was moved to a quiet room and died peacefully a few minutes later. Afterwards, when the medical registrar rang his consultant to inform him of what had happened, a question that arose was, What should we put on the death certifi cate? Entwined in our search to conquer death lies the possibility that understanding how someone died might give us clues as to what we could fi x next time. The cause of death is dryly captured on the death certifi cate a form that serves two purposes: as a legal document and to provide information for demographic and health purposes, giving insights into diseases and factors contributing to reduced life expectancy.2 The origin of the death certifi cate lies in the English Bills of Mortality. Their publication began in 1665 in London during the bubonic plague, when the weekly publishing of numbers of burials in the London parishes highlighted the areas that healthy citizens should avoid.3 In 1837, the General Register Offi ce was set up to record births, deaths and marriages, as a response to increasing concern that poor registration of these events undermined property rights, and from the increasing recognition of the association between poor living conditions and life expectancy.4 The chief statistician, William Farr, developed a standardised list of causes of death, which were a great improvement on 18th century causes such as overjoy, blasted, or kings evil.5 Several decades later, medical practitioners were required to complete the forms. By 1910, the International Classifi cation of Diseases (ICD) was used to attempt to further standardise the diseases recorded on the death certifi cate. In the early days of Australia, there were state-based death registers, which were combined in 1964 to create the National Mortality Database.6 Every year, the Australian Bureau of Statistics produces a cheery top 20 causes of death and years of potential life lost, by ICD code, and the Australian Institute of Health and Welfare National Death Index is an opportunity for linking the death database for epidemiological studies. Data linkage from death databases appears to provide amazing opportunities for understanding associations between mortality and various health-related factors such as diseases, treatments received and socioeconomic factors. However, one critical element in this relates to the quality of the information provided on the death certifi cate. There are guidelines to help doctors complete death certifi cates but research suggests that many doctors do not receive proper training to undertake this task correctly and distinguish between the disease or condition directly leading to death and the antecedent causes.2,7-9 Added to this is the complexity of assigning one of the 8000-odd ways one can die, as tabled in the ICD-10. Furthermore, doctors do not know the cause of death more often that we like to admit. Post mortem studies have shown signifi cant disagreement on the cause of death between the autopsy and death certifi cate in up to 29% of cases.10 Researchers have called for training of clinicians in completing death certifi cates.11,12 Several research articles have used the specifi c cause of death as an outcome measure, instead of using mortality, in an effort to obtain mechanistic insights into the processes occurring.13,14 Recognising the lack of validated systems being used in recording the specifi c cause of death, Ridgeon and colleagues have sensibly attempted to create one. Their article shows that the Intensive Care Unit Deaths Classifi cation and Reason (ICU-DECLARE) system provides a simple and reproducible way for critical care researchers to reliably classify cause of death. The proximate cause of death has a systems approach, which is potentially useful in understanding the role of different treatments. Ridgeon and colleagues found substantial agreement between ICU specialists on the proximate cause of death, and moderate agreement on the underlying cause of death. This system is an important step forward, but it is important to note that there still was not almost perfect agreement between respondents there are judgements being made and no gold standard exists (apart from a post mortem). One useful lesson from the study is that the ICU specialists had better agreement than the research coordinators, possibly because ICU specialists have had experience in determining cause of death previously.
Would you ever drink flame retardant? Would you eat lice shampoo? If the thought of consuming these products sounds crazy to you, you might be surprised to discover if you eat the Standard American Diet (SAD, which it is indeed!) full of processed foods, there’s a good chance you do eat or drink these chemicals… and more. It’s one of the scary (and gross) things about processed foods – they many contain chemical ingredients we would never consider eating if we knew what they were. But that’s the problem isn’t it? With the ubiquity of processed foods, in our busy lives our consciousness is not awake to what we are actually consuming. It’s time to wake up! A number of the ingredients are banned in other developed nations because of their potential to harm human health- but not in the U.S.! I know…what the heck!! Let’s take a look at some toxic ingredients in processed foods, as well alternatives that support health instead of harming it. Ingredient: Brominated Vegetable Oil (BVO) Foods that contain it: Sports drinks, citrus sodas Also found in: Flame retardant Where it’s banned: Japan, European Union PepsiCo recently announced they would discontinue the use of BVO in several of their Gatorade flavors, although the ingredient will remain in both Mountain Dew and Diet Mountain Dew. Gee thanks PepsiCo! So what’s the problem with BVO? BVO can build up in body tissue, leading to toxic effects including: Memory loss Loss of muscle coordination Hormonal disruption Lesions Kidney disease Delayed or stalled brain development Alternatives: Skip the soda, energy drinks, and other bottled beverages altogether. Instead, carry a BPA-free water bottle with you filled with clear, fresh water. Ingredient: Azodicarbonamide Foods that contain it: Dried pasta, bread, other wheat products Also found in: Foamed plastics and synthetic leather Where it’s banned: Australia, European Union, Singapore. This ingredient is used to speed up how quickly flour bleaches. According to the World Health Organization, “Case reports and epidemiological studies in humans have produced abundant evidence that azodicarbonamide can induce asthma, other respiratory symptoms, and skin sensitization in exposed workers.” Alternatives: Skip products containing bleached wheat. Instead, enjoy organic whole grains like quinoa or amaranth, which are delicious and full of nutrients. Ingredient: Artificial coloring yellow 5, yellow 6, blue 1, blue 2(mostly made from coal tar) Foods that contain it: Too many to list! But here’s a few: candy, sports drinks (those nasty things again!), fruit snacks, powdered drink mixes, cake mixes, some yogurts, ice cream… Also found in: Lice shampoo, floor seal coating Where it’s banned: European Union Artificial colors have long been scrutinized as potential health hazards. Over the years, a number of artificial coloring ingredients have been banned because of their potential health risks. Recent studies suggest artificial colors and preservatives may aggravate ADHD symptoms in children. Likewise, a study on food color consumption showed an increase in inflammation, as well as possibly causing liver and kidney problems. Alternatives: The great thing about natural, organic produce is it is colorful and appetizing without any chemical colorings or additives. If you’re looking for food that looks as pretty as it tastes, load up on fruits and veggies across the spectrum of color. The different colors in these natural foods indicate different vitamin and minerals. By eating a broad range of colorful produce, you’ll be certain you’re getting the vitamins and minerals your body needs – without artificial coloring. Ingredient: Brominated flour (bromates)/potassium bromate Foods that contain it: Breads and tortillas, crackers, other baked goods Where it’s banned: United Kingdom, Canada, China, Brazil, Sri Lanka, Nigeria, Peru There’s a good reason so many countries have banned potassium bromate. Studies show it is both carcinogenic and toxic to the kidneys. It may also induce thyroid and kidney tumors. If you live in California, this ingredient is a little easier to avoid. The state requires warning labels on foods containing it. Alternatives: In The Beauty Detox Solution, I recommend avoiding wheat flour and products containing wheat flour. Instead, try organic gluten-free crackers, or use lettuce or a collard green or nori wrapper as a “wrap” when you really want a sandwich. Ingredient: GMOs Foods According to Mother Jones, at least 70 percent of processed foods in the United States contain GMOs Where they are banned or restricted: Algeria, Thailand, Sri Lanka, European Union, Saudi Arabia, Brazil, Paraguay, Australia, New Zealand Genetically modified foods are pretty scary, given their relative short time in the food supply. GM foods weren’t even around until 1996. Now, thanks in large part to the patents chemical giant Monsanto has placed on seeds, it is becoming increasingly difficult for American farmers to keep GMOs out of their crops. (I highly recommend watching the documentary, David vs. Monsanto to learn more about this). According to a 2009 article in Critical Review of Food Sciences in Nutrition, GMOs may have a number of potential health risks, including: Kidney and liver issues Pancreatic problems Reproductive affects Severe allergic reactions Cancer Reactivation of viruses DNA damage or alteration Alternatives: I recommend avoiding processed foods and any supplements like fish oil capsules because they have such a high rate of GMOs in them. Instead, eat organic natural foods such as fruits and vegetables, organic raw nuts and seeds and organic grains. What’s the bottom line? The FDA continues to allow these ingredients in our processed food supply in spite of very real health concerns. There’s some scary stuff out there, and you have to be your own health advocate. Avoid processed foods, and when you do eat them, read labels carefully. If there’s an ingredient you don’t recognize, find something else to eat. In The Beauty Detox Solution and The Glowing Lean System, I teach you how to eat for vibrant good health, avoiding potentially toxic ingredients. Let’s partner together to raise our consciousness between our diet and our overall health and well-being! Changing your diet will improve all aspects of your life.
GrandJazzFest is seeking volunteers to help at the free, family-friendly live jazz festival Aug. 18 and 19 at Rosa Parks Circle in downtown Grand Rapids. Three-hour shifts are available to assist in presenting this event to the community. All volunteers will receive a T-shirt and bottled water. Those interested in volunteering can go to grandjazzfest.org/volunteers/ to sign up.
1. Field of the Invention The invention relates to a gas turbine arrangement having a rotor and at least two rows of turbine blades or vanes, a method for operating the gas turbine arrangement, and a turbine blade or vane for use in the gas turbine arrangement. 2. Brief Description of the Related Art On account of the temperatures of the hot gases which surround them, turbine blades and vanes of gas turbines have to be cooled. Coolable blades or vanes for gas turbines with an internal cooling system have been disclosed, for example, by laid-open specification DE-A1 198 60 788, by EP-A1 0 534 586 or by EP-A1 1 094 200. Cooling air is guided out of a cooling passage located in the rotor into the internal cooling system and is then passed through discharge openings into the flow passage of the respective gas turbine. One major problem with cooling systems of this type is a leakage stream of cooling air which escapes between turbine blades or vanes and/or rotating and static parts of the gas turbine. Sealing devices which are supposed to minimize the cooling air leakage stream at this location are known from EP-A1 1 094 200, U.S. Pat. No. 6,152,690, U.S. Pat. No. 6,086,329, U.S. Pat. No. 4,820,116, U.S. Pat. No. 4,626,169, U.S. Pat. No. 4,505,640, U.S. Pat. No. 4,439,107, U.S. Pat. No. 4,265,590 and DE-A 1 942 346. Other documents disclose devices which serve the purpose of minimizing the leakage stream of cooling air and of introducing the remaining stream into the hot gases of the gas turbine with the minimum possible losses and turbulence or utilizing it in some other way. In this context, mention may be made, for example, of U.S. Pat. No. 5,211,533. Furthermore, U.S. Pat. No. 5,800,124 discloses a seal in which the leakage stream is diverted onto the trailing edge of the turbine blade or vane in order for the platform to be cooled there by impingement cooling. U.S. Pat. No. 6,077,035 has disclosed a metal diverter sheet which prevents the leakage stream between the rotor blades and introduces the cooling air between the guide vanes and rotor blades with low losses. U.S. Pat. No. 4,348,157 has disclosed a similar device.
Development and Initial Validation of the Work Addiction Inventory Title of Document: Development and Initial Validation of the Work Addiction Inventory Nicole A. Bryan, Master of Arts, 2009 Directed By: Dr. Robert W. Lent, CAPS Department The purpose of the study is to develop and validate the Work Addiction Inventory (WAI). The WAI is designed to assess individuals addiction to work via self-report. Data were collected from 127 working professional employed on at least a part-time (20 hours per week) basis. Results of an exploratory factor analysis retained 24 items and indicated that the WAI consists of three underlying factors. The WAI subscale and total scores showed adequate internal consistency reliabilities. Convergent and discriminant validity was initially supported by the relationship between WAI scores, an existing measure of workaholism, and social desirability. Also, WAI scores correlated highly with several criterion variables. Finally, evidence was found to suggest that the WAI accounts for unique variance beyond an existing measure of workaholism. In conclusion, psychometric properties of the WAI were initially supported by findings of the study. Development and Initial Validation of the Work Addiction Inventory
#include <iostream> int main() { int value = 42; // int型の変数 int other = 0; // 別の変数 int* pointer = &value; // 変数valueへのポインター int& reference = value; // 変数valueへの参照 std::cout << "valueのアドレスは" << &value << "で、値は" << value << "です" << std::endl; std::cout << "otherのアドレスは" << &other << "で、値は" << other << "です" << std::endl; std::cout << "pointerの持つアドレスは" << pointer << "で、値は" << *pointer << "です" << std::endl; std::cout << "referenceのアドレスは" << &reference << "で、値は" << reference << "です" << std::endl; pointer = &other; // ポインターが持つアドレスをotherのアドレスに変更 reference = other; // 参照先をvalueからotherへ変えたつもり std::cout << std::endl; // 1行空行を入れる std::cout << "valueのアドレスは" << &value << "で、値は" << value << "です" << std::endl; std::cout << "otherのアドレスは" << &other << "で、値は" << other << "です" << std::endl; std::cout << "pointerの持つアドレスは" << pointer << "で、値は" << *pointer << "です" << std::endl; std::cout << "referenceのアドレスは" << &reference << "で、値は" << reference << "です" << std::endl; }
Methylnaltrexone in the treatment of opioid-induced constipation Constipation is a significant problem related to opioid medications used to manage pain. This review attempts to outline the latest findings related to the therapeutic usefulness of a opioid receptor antagonist, methylnaltrexone in the treatment of opioid-induced constipation. The review highlights methylnaltrexone bromide (Relistor™; Progenics/Wyeth) a quaternary derivative of naltrexone, which was recently approved in the United States, Europe and Canada. The Food and Drug Administration in the United States approved a subcutaneous injection for the treatment of opioid bowel dysfunction in patients with advanced illness who are receiving palliative care and when laxative therapy has been insufficient. Methylnaltrexone is a peripherally restricted, opioid receptor antagonist that accelerates oralcecal transit in patients with opioid-induced constipation without reversing the analgesic effects of morphine or inducing symptoms of opioid withdrawal. An analysis of the mechanism of action and the potential benefits of using methylnaltrexone is based on data from published basic research and recent clinical studies. Introduction Morphine and other opioid agonists are potent analgesics that represent the mainstay of therapy in the treatment of acute and chronic severe pain. Opioid analgesics work by predominantly stimulating opioid receptors in the central nervous system (CNS). However, constipation is a signifi cant problem in patients taking opioid agonists for pain relief due primarily to their effect on opioid receptors located in the periphery within the gut itself. This review will focus on one of the currently available opioid receptor antagonists, specifi cally methylnaltrexone bromide (Relistor TM ; Progenics/ Wyeth), a quaternary derivative of naltrexone which was recently approved by the Food and Drug Administration (FDA) as a subcutaneous injection for the treatment of opioid bowel dysfunction in patients with advanced illness who are receiving palliative care and when laxative therapy has been insuffi cient. 1 The review will provide an overview of methylnaltrexone's ability to promote gastrointestinal (GI) motility in patients with opioid-induced constipation without compromising the analgesic effects of morphine or stimulating symptoms of opioid withdrawal. The later sections of the review will discuss the potential use of methylnaltrexone for the treatment of constipation associated with post-operative ileus. Effect of opioids on gastrointestinal motility In the CNS the opioid receptor is the primary opioid receptor involved in pain transmission. Although there is limited evidence to suggest that centrally located opioid receptors may be involved in the control of GI transit, the dominant effect of the opioid receptor agonist, morphine, on the GI tract appears to occur via opioid receptors located peripherally within the gut wall. 2,3 Recent evidence has demonstrated opioid receptors in enteric nervous system, specifi cally the submucosal and myenteric plexus in association with interstitial cells of Cajal. The muscle layers of the small and large intestine also have shown opioid receptor immunoreactivity. For excellent reviews of the anatomical distribution and function of the opioid receptor in the GI tract the reader is referred to Sternini and colleagues. Within the GI tract classical animal experiments demonstrated that morphine, fentanyl and met-enkephalin cause inhibition of both the longitudinal and circular muscle layers. 7 More recent studies using a vascularly perfused intestinal segment demonstrated that morphine, dermorphin, D-Ala 2 -D-Met 5 -enkephalin, FK 33-824 and dynorphin reduced the frequency of peristaltic waves and the maximal ejection pressure. 8 From multiple studies and clinical experience, a delay in GI transit is a well known characteristic of opioid receptor agonists including morphine, diphenoxylate and loperamide. 9 The effects of opioids in delaying intestinal transit is speciesdependent and interspecies differences must be taken into account when examining the contribution of opioid agonists on GI motility. The delay in GI transit with opioid agonists occurs through either an inhibition of propulsive motility (rat) or a stimulation of non-propulsive or segmental patterns of motility in dog and man. 10 The in vivo effects of opioid agonists to delay GI transit is due to an inhibition of the release of acetylcholine as well as to the release of nonadrenergic-noncholinergic (NANC) neurotransmitters from enteric nerves. 11 These effects are mediated at least in part via opioid receptors present on circular muscle motor neurons. 12 Moreover, in the rat ileum, selective agonists of (PLO17) and (U-50488) receptors inhibit neurotransmitter release along the ascending excitatory refl ex pathway. 13 A detailed review of the literature reveals that opioid receptors also are involved in the effects of opioids on motility. Delta receptors do not regulate the activity of myenteric excitatory motor neurons, since selective receptor agonists (DPDPE) or antagonists (ICI174864) are ineffective. 14 However, in the circular muscle of the guinea pig and human colon, NANC inhibitory motor responses are reduced by activation of receptors. 15,16 In summary, morphine or morphine-like opioid agonists induce a delay in GI transit and are involved in the development of opioid bowel dysfunction through a mechanism involving predominantly opioid receptors located within the GI tract. Opioid bowel dysfunction Opioid analgesics represent an important therapy for pain management, however opioids also have signifi cant effects on GI motility to delay GI transit and cause constipation that may be so severe that it can limit pain management. Opioids delay GI transit via an inhibition of gastric emptying, a slowing of small and large bowel transit and an increase in anal sphincter tone due to activation of opioid receptors located in the GI tract. As early as 1917, Trendelenburg demonstrated in an isolated preparation of guinea-pig small intestine that morphine inhibits peristalsis. 7 Since then many studies have confi rmed that morphine and related opioids delay transit throughout the GI tract via a peripheral mechanism. Although peripherally restricted opioid receptor agonists such as loperamide have been shown to slow GI transit and are useful for the treatment of diarrhea 9, the constipating effects of morphine that acts at both central and peripheral opioid receptors is a signifi cant problem in patients receiving morphine for the relief of pain. Stimulation of the opioid receptors in the GI tract by morphine frequently results in unwanted effects termed opioid bowel dysfunction. Opioid bowel dysfunction is characterized by severe constipation, hard stools, straining, incomplete evacuation, bloating, abdominal distension, and increased gastroesophageal refl ux. It is estimated that about 40% of patients taking chronic opioids for pain develop opioid bowel dysfunction. 20 Clearly, the mechanisms of opioid bowel dysfunction are complex; however studies have shown that gut hypomotility correlates with opioid concentration in the enteric nervous system 21 and the morphineinduced inhibition of GI transit results from activation of opioid receptors in the gut. 22 Interestingly, although patients develop tolerance to opioid-induced nausea, vomiting and sedation, they rarely develop tolerance to the delayed GI transit and resulting constipation induced by opioids. 23 In summary, chronic opioid use for severe pain is associated with signifi cant adverse effects including opioid-induced bowel dysfunction characterized by constipation that is often not relieved by laxatives. Therapeutic approaches for treating opioid-induced constipation Laxatives and promotility agents Until recently the management of opioid-induced bowel dysfunction was limited to use of a stimulate laxative such as bisacodyl or senna with or without addition of stool softeners such as docusate sodium or lactulose 24-26 as well as increasing dietary fi ber, increasing fl uid intake and/or suggesting daily exercise whenever possible to treat the constipation. Osmotic laxatives and bulk forming laxatives also have been employed in patients with opioid-induced bowel dysfunction but they must be used cautiously in patients requiring fl uid restriction, bedridden patients or those with strictures or partial bowel obstruction. 26,27 Often increased fi ber intake or use of bulk laxatives increase pressure in the gut, worsening the patient's pain and increasing their discomfort. Promotility agents such as metaclopramide, a dopamine D 2 receptor antagonist, also are used to accelerate GI transit in a subset of patients with delayed GI transit suffering from GI autonomic dysfunction. 27 Although laxatives are benefi cial in some patients, they are poorly effective in most and opioid-induced bowel dysfunction persists despite aggressive laxative therapy. Until recently laxatives represented the mainstay of therapy for the treatment of opioid-induced constipation, despite their serious limitations. Opioid receptor antagonists In very severe cases of opioid-induced constipation patients reduce their use of opioids to alleviate the constipation despite the resulting loss of adequate pain relief. The fi rst competitive opioid antagonists that were used to treat opioid bowel dysfunction included naloxone, naltrexone and nalmefene. While these antagonists were selective for opioid receptors, they were not selective for the periphery, and have both central and peripheral activity due to their ability to cross the blood brain barrier. Although such compounds were able to increase laxation in patients with opioid-induced constipation, they also were associated with symptoms of opioid withdrawal and a marked decrease in adequate pain relief due to the effects of the compounds on central opioid receptors. Thus the search was initiated for an opioid antagonist that possessed the capability of reversing opioidinduced constipation without reducing the level of analgesia or stimulating opioid withdrawal. It was suggested in the latter part of the twentieth century that quaternary narcotic antagonists might be useful to treat opioid-induced peripheral side effects since some of these agents failed to cross the blood brain barrier readily. 28,29 Quaternary opioid antagonists were developed that had increased polarity and decreased lipid solubility and a reduced ability to cross the blood brain barrier following systemic administration. As a result, these compounds bound only to peripheral receptors unless administered directly into the brain. 28,29 Today peripheral opioid receptor antagonism offers a newly approved class of therapeutics for the treatment of constipation associated with the long-term use of opioids while preserving centrally mediated analgesia. 1 A major focus of the subsequent section of this review will be the use of methylnaltrexone for the treatment of opioid-induced bowel dysfunction, specifi cally severe constipation in patients receiving opioid therapy for pain management. Effect of methylnaltrexone in preclinical experimental models An extensive series of studies were designed to characterize the pharmacological profile of methylnaltrexone. Until recently, opioid receptor affi nity was assessed in whole rat brain or guinea pig ileum tissue by displacing the binding of non-selective opioid antagonists, 3 H-etorphine or 3 H-diprenorphine. These studies revealed that the quaternary derivative of naltrexone exhibited only 1% to 3% of the affi nity for the opioid receptor as naltrexone itself. 30 Assessment of the affinities and selectivity of methylnaltrexone for, and opioid receptors recently was performed in cells expressing recombinant human opioid receptors as well as in animal tissues expressing endogenous opioid receptors. These studies demonstrated that the compound displaced opioid binding to opioid receptors with an affi nity of 10 nM, and a 3-fold lower affi nity for opioid receptors (K i 30 nM). 31 These studies also demonstrated that the affi nity of methylnaltrexone for opioid receptors is much less (K i 15.8 M). 31 While initial reports indicated that methylnaltrexone had no intrinsic opioid agonist activity, 28-30 more recent studies comparing it against a variety of opioid antagonists demonstrated that the compound exhibited weak partial agonist activity at recombinant and opioid receptors (intrinsic activity 10 and 12, respectively). 31 Using in vitro and in vivo models the effect of methylnaltrexone on the GI tract was systematically studied. Methylnaltrexone reversed morphine-induced inhibition of gut contractility in isolated guinea-pig ileum and human small intestine. 31,32 A limited number of studies have also suggested that there may be endogenous inhibitory opioid tone of the GI musculature since methylnaltrexone alone enhanced muscle contractility in tissue isolated from the human 32 or equine 33 GI tract. However, effects of methylnaltrexone on electricallyevoked contraction of guinea pig ileum muscle strips are mixed, with methylnaltrexone inhibiting contractions in one study 31 and exacerbating contractions in another. 32 Some of these differences may result from differences in electrical current applied to the tissue in the two experiments, as well as to different levels of endogenous opioid inhibitory tone between species. However, whether methylnaltrexone has therapeutic potential in other GI disorders characterized by hypomotility requires further research. The effect of methylnaltrexone on GI transit in vivo demonstrated that methylnaltrexone at doses of 1, 3 and 10 mg/kg, administered subcutaneously, effectively antagonized morphine-induced inhibition of GI transit in rats. 34 To test the hypothesis that antagonism of the effects of opioids in the gut can be accomplished without compromising analgesia, experimental models demonstrated that subcutaneous administration of methylnaltrexone had no effect on morphine-induced analgesia whereas if the methylnaltrexone was administered directly into the brain there was a marked reduction in analgesia. 34,35 In summary, the efficacy of methylnaltrexone to antagonize the GI-mediated events effects of morphine occurred at doses that failed to antagonize morphineinduced analgesia. Early preclinical experiments were also conducted with methylnaltrexone to ensure that following intravenous administration there were no signs or symptoms of withdrawal in opioid-tolerant dogs. 28 The study found that in response to doses of methylnaltrexone as high at 50 mg/kg there were no symptoms of withdrawal whereas signs of withdrawal were noted with doses of naltrexone as low as 0.5 mg/kg. 28,36 Clinical pharmacology of methylnaltrexone A summary of the effi cacy and pharmacokinetic characteristics of methylnaltrexone in healthy human subjects, as well as patients with advanced illness or on chronic methadone treatment are provided in Table 1. With iv or sc delivery, methylnaltrexone is rapidly absorbed in a dose-dependent manner, with a peak concentration (C max ) generally reached within 20 to 30 minutes (T max ) and 1/2 of elimination is 100 to 130 minutes. While orally administered methylnaltrexone is absorbed in a dose-dependent manner, far less is absorbed in general, maximal blood levels are not achieved until almost 2 hours, and the 1/2 is around 3 hours. 40 The primary pathways of metabolism are the conversion to methyl-6-naltrexol isomer (5% of the total) and methylnaltrexone sulphate (1.3% of the total). N-methylation of methylnaltrexone to naltrexone is not a signifi cant issue. 41 The excretion of methylnaltrexone is via the urine and feces and approximately 40% to 50% of the compound is excreted unchanged in the urine following sc or iv. administration. 42,43 Interestingly, following oral administration, only a tiny fraction of methylnaltrexone is excreted unchanged (0.3% or less). 40,44 Almost a 100-fold higher dose of oral methylnaltrexone (19.2 mg/kg) 40 was required to produce maximal plasma concentrations equivalent to those produced by 0.1 mg/kg sc. 39 or 0.16 mg/kg iv methylnaltrexone. 37 Since only a fraction of the methylnaltrexone was excreted unchanged yet maintained its ability to antagonize morphine-induced oral-cecal transit delay (Table 1), this suggests that the majority of the compound remained in the GI tract instead of being absorbed into the bloodstream. 40,44 An enteric-coated formulation of methylnaltrexone was effective orally at only 3.2 mg/kg. 44 Clinical effi cacy studies of methylnaltrexone for opioid-induced constipation In early clinical studies, methylnaltrexone was used as a pharmacological tool to examine the relative importance of peripheral opioid receptor antagonism in modulating opioidinduced delay in gastric emptying 45 as well as oral-cecal transit. 38 In the fi rst human study to demonstrate that opioids affect gastric emptying via a peripheral mechanism distinct from the central analgesic effects of opioids, 11 healthy controls were given placebo (saline) plus morphine or methylnaltrexone plus morphine in a randomized doubleblind crossover controlled trial. 45 The results showed that morphine prolonged gastric emptying and that methylnaltrexone prevented the morphine-induced gastric emptying. In 1996, Yuan et al reported that methylnaltrexone blocked morphine-induced oral-cecal transit delay with no effect on morphine analgesia, demonstrating for the very fi rst time in human that opioid effects on the gut are mediated through peripheral opioid receptors distinct from those receptors located centrally that mediate analgesia. 38 This and other studies performed in healthy volunteers measured the ability of methylnaltrexone to reverse morphineinduced oral to cecal transit time using the lactulose hydrogen breath test; 38,39,44,46 pain intensity in response to the cold pressor test was assessed in one study to measure levels of pain reporting in the same subjects. 38 Methylnaltrexone administered intravenously, orally or subcutaneously reversed the delay in oral-cecal transit induced by morphine 38,39,44,46 without any effect on levels of analgesia. 38 Furthermore, in a randomized placebocontrolled trial, methylnaltrexone administered intravenously at a cumulative low dose ranging from 0.015-0.365 mg/kg to 11 subjects with chronic methadone-induced constipation reversed the opioid-induced increase in GI transit time and produce immediate laxation in 91% of patients on day 1 and 100% of the patients by the morning of the second day of dosing 43 (Table 1). More recently in one small study and then two larger pivotal studies, the effectiveness of methylnaltrexone was investigated in patients with advanced illness with a life expectancy of less than 6 months, who were receiving palliative opioid therapy and had opioid-induced constipation (defi ned as fewer than 3 bowel movements in the preceding week or no bowel movement for 2 days). Rescue laxatives were prohibited Table 1 Clinical pharmacokinetic and effi cacy profi les of methylnaltrexone (MNTX) in healthy subjects, patients with advanced illness and chronic methadone patients 41 Similar results were report in another Phase III study published by Thomas and colleagues in 2008. 48 A total of 133 patients who were using opioids for pain relief for at least 2 weeks and were taking laxatives without relief for their opioid-induced constipation were randomly assigned to receive either subcutaneous methylnaltrexone (0.15 mg/kg, 62 patients) or placebo (71 patients) every other day for a week. During the second week the dose of the compound could be increased 0.30 mg/kg if the patient had 2 or less rescue-free laxations up to day 8. The study found that 48% of patients receiving methylnaltrexone had a bowel movement within 4 hours of the fi rst dose while only 15% of patients receiving placebo had a bowel movement within 4 hours. However, when the patients were asked to self assess improvement in their bowel status using a Global Clinical Impression of Change (GCIC) Scale after 7 and 14 days, most patients (73%) reported increased satisfaction following methylnaltrexone therapy compared to only 35% of patients in the placebo group, suggesting that methylnaltrexone improved the constipating symptoms in over 70% of patients. An important component of the study was that no signifi cant changes in pain scores were observed and there were no signs of opioid withdrawal. 48 This and other clinical studies have also assessed the occurrence of methylnaltrexone-related adverse effects. 23,37,43,45,47,48 There was no dose-dependence associated with the adverse effects, which were generally gastrointestinal in nature (abdominal pain, diarrhea, fl atulence and nausea) and tolerable. In fact, these side effects are common to existing treatments for opioid-induced constipation. In a dose-escalating study in which methylnaltrexone was iv infused, a few subjects reported transient orthostatic hypotension, that resolved quickly. 37 The rates of discontinuation due to adverse events during the double-blind placebo controlled clinical trials described above were comparable for methylnaltrexone (1.2%) and placebo (2.4%); no serious adverse effects were attributed to methylnaltrexone. In summary, the results showed that in these specifi c patient groups with advanced illness, methylnaltrexone administered subcutaneously, relieved opioid-induced constipation but most importantly did not reduce analgesia or cause any symptomatology associated with opioid withdrawal. Methylnaltrexone was effective in almost 70% of those patients studied, and it is hoped that further clinical studies will be performed in patients with less advanced disease. In addition to the currently approved subcutaneous route of administration for methylnaltrexone, its future development involves multiple routes of administration including oral/oral enteric-coated oral forms of methylnaltrexone for opioid-induced constipation and an intravenous formulation for post-operative ileus. Potential use of methylnaltrexone for the treatment of post-operative ileus Although quite different from opioid bowel dysfunction in the setting of chronic opioid administration, another common condition termed post-operative ileus is a transient impairment of GI motility that routinely develops as a consequence of abdominal surgery. Although the pathophysiology of post-operative ileus is very complex involving infl ammatory, neural and hormonal mechanisms, there is a signifi cant pharmacological component to post-operative ileus. Specifi cally the use of opioid drugs, such as morphine for the management of post-operative pain, is well known to exacerbate the severity of post-operative ileus (see review 49 ). In fact a retrospective study of post-operative ileus patients identifi ed that the use of opioid-based analgesics represent a key risk factor for the development of ileus following surgery. 50 Usually patients recover from post-operative ileus in 1 to 2 days, particularly after laparoscopic bowel surgery. However, prolonged and untreated post-operative ileus may require nasogastric intubation and sometimes even parenteral nutrition. The duration of ileus following surgery is dependent on which part of the GI tract is most affected, with the small intestine recovering within the fi rst 24 hours whereas the stomach (24-48 hours) and the colon require longer (48-72 hours) recovery periods. Certain patients who are less mobile post-surgery have been found to be more susceptible to post-operative ileus. Delayed gastric emptying and intestinal transit are the main factors leading to symptoms of post-operative ileus which include abdominal bloating and pain, nausea and vomiting, anorexia, and reduced defecation. In addition to postoperative opioid dosage, the duration of post-operative ileus following colorectal surgery also is positively correlated with the amount of blood loss and surgery time. 51 Although the analgesic effects of opioids, such as morphine, are predominantly mediated by opioid receptors in the CNS, the action of morphine to delay GI transit involves predominantly activation of opioid receptors in the periphery to inhibit enteric refl exes and suppress GI transit. 16 Despite the fact that post-operative abnormalities in GI transit are common and can signifi cantly delay a patient's recovery, the pathophysiological mechanisms causing post-operative ileus are incompletely understood. 52 Research from animal studies has shown abdominal surgery stimulates capsaicin-sensitive afferent fi bers that lead to activation of inhibitory efferent pathways and disrupt coordinated patterns of GI motility. Evidence is greatest for adrenergic receptor mediated pathways that involve alpha 2 receptors located on vagal nerve terminal and postganglionic cholinergic nerves in the myenteric plexus that inhibit the release of acetylcholine in post-operative ileus. 53 Thus neural pathways producing post-operative ileus are complex and dependent on a number of factors including the magnitude and intensity of the stimulus. For example, incision of the skin and laparotomy stimulate adrenergic inhibitory neuronal pathways, 54,55 whereas abdominal surgery with handling of the intestine activates supra-spinal pathways that involve stimulation of the hypothalamic pituitary adrenal (HPA) stress axis and release of corticotrophin-releasing factor (CRF). In addition, non-adrenergic, non-cholinergic (NANC) neuronal pathways may also inhibit GI motility. Multiple inhibitory NANC neurotransmitters such as nitric oxide (NO), calcitonin gene-related peptide (CGRP) and vasoactive intestinal polypeptide (VIP) are present within the enteric nervous systems, and they too may play an important role in the pathogenesis of post-operative ileus. 59-61 Recent fi ndings have also shown that manipulation of the bowel during surgery induces a marked intestinal infl ammation in rodents. These studies showed that specifi c infl ammatory cells such as monocytes, neutrophils and mast cells increase in response to bowel manipulation, and there was a marked induction within the GI musculature of inducible nitric oxide synthase (iNOS) and cyclo-oxygenase-2 (COX-2) mRNA. The infl ammatory response induced by bowel manipulation also leads to the secretion of a series of pro-infl ammatory cytokines, including interleukin-1, interleukin-6, tissue necrosis factor- and monocyte chemotactic protein-1. There is also evidence that bowel manipulation up-regulates adhesion molecules such as intercellular adhesion molecule-1 (ICAM-1), and that the ICAM-1 antisense oligonucleotide ISIS 3082 prevents the development of ileus in mice. 66 Interestingly, in an animal model of post-operative ileus produced by bowel manipulation, the magnitude of the infl ammatory response appeared to be proportional to the decrease in GI transit measured in vivo and the loss of smooth muscle contractility observed in an organ bath preparation. 67, 68 The development of new pharmacological strategies to accelerate the recovery from POI are urgently needed because post-surgical GI dysmotility represents a major health problem contributing to patient morbidity, prolonged hospital stays and increased health care costs. Recently positive clinical effi cacy data obtained with a peripherally acting antagonist of the opioid receptor, alvimopan, in treating the delay in GI transit following surgery led to its recent approval by the FDA for treatment of post-operative ileus. However, clinical reports related to the effi cacy of methylnaltrexone for the treatment of post-operative ileus have been inconclusive to date. In a Phase II trial performed in 65 patients with post-operative ileus induced by colonic resection, administration of methylnaltrexone (0.3 mg/kg, intravenous) after surgery every 6 hours for 24 hours recovered from post-operative ileus 1 day faster than those receiving placebo as assessed by time to fi rst bowel movement, ability to tolerate food and hospital discharge. 69 However, those fi ndings were not supported by preliminary results from a Phase III trial of 542 patients with post-operative ileus; methylnaltrexone administered at 12 or 24 mg every 6 hr did not show effi cacy over placebo. 70 At the time of this review, a second Phase III trial is underway with preliminary results suggesting that methylnaltrexone was not different from placebo in treating post-operative ileus. 70 The reason for these differences in clinical effi cacy between alvimopan and methylnaltrexone for the treatment of post-operative ileus remains to be determined but while both alvimopan and methylnaltrexone share many pharmacological properties, there are a few notable differences. Evidence suggests that alvimopan has inverse agonist activity in the guinea pig ileum assay while methylnaltrexone does not. 8 If the various conditions associated with post-operative ileus described above contribute to impaired GI transit, one can see how an antagonist with inverse agonist properties might more effectively increase gut motility than one with very weak partial agonist activity. Alvimopan also exhibits a slightly greater / opioid receptor selectivity ratio. 30 Finally, it has been proposed that P-glycoprotein (P-gp) expression in the small intestine may affect the response of the small intestine to chronic drug treatment. 71 For instance, while tolerance does not develop to the constipating effects of morphine, tolerance does develop to the inhibitory GI transit effects of loperamide. This difference may be attributed to the fact that loperamide is a better substrate for P-gp than morphine. To date, there are no reports on the affi nity of either methylnaltrexone or alvimopan for P-gp or other drug transporters, but expression of these transporters is altered (generally reduced) by acute activation of infl ammatory mediators such as iNOS, interleukin-1, interleukin-6 and tumor necrosis factor-. Reduced expression of P-gp or other drug effl ux transporters could increase the level of drug in the gut tissue and increase its effi cacy. Finally, the lack of effi cacy of methylnaltrexone for the treatment of post-operative ileus may relate to the design of the clinical trails, dosage and route of administration, and such factors are currently under investigation. Conclusions and future directions Although the actions of opioids in the gut have been well documented, major gaps remain in our understanding of the precise mechanisms underlying these effects, and of the potential role of opioid systems in GI diseases. Opioid analgesic use is commonly associated with GI side effects suggesting a role for opioid systems in both GI function and pathophysiology. The use of opioid receptor antagonists with activity limited to the periphery has proven to be of benefi t in the clinic, where peripherally acting opioid antagonists, such as methylnaltrexone, reverse the unwanted peripheral side effect of constipation associated with opioid therapy while preserving centrally mediated opioid analgesia. Taken together, the preclinical data on methylnaltrexone are consistent with the clinical reports confi rming the ability of methylnaltrexone to antagonize opioid-induced constipation without reversing analgesia or precipitating withdrawal symptoms. Additional research is required to determine whether methylnaltrexone also will be useful for the treatment of post-operative ileus.
package ca.warp7.frc2022.subsystems; import edu.wpi.first.wpilibj2.command.Subsystem; import static ca.warp7.frc2022.Constants.*; import ca.warp7.frc2022.lib.Util; public class Launcher implements Subsystem { private static Launcher instance; private final double targetRPS; private double currentRPS; public static Launcher getInstance() { if (instance == null) instance = new Launcher(); return instance; } private Launcher(){ if(kIsLauncherLobber){ targetRPS = kLobberRPS; } else{ targetRPS = kShooterRPS; } currentRPS = 0.0; } @Override public void periodic() { // currentRPS = flywheelMasterNeo.getEncoder().getVelocity() / kFlywheelGearRatio / 60; } //Bad temp documentation note: Epsilon is the allowed decemal error since doubles and floats subtract weird. public boolean isTargetReached(double epsilon) { return Util.epsilonEquals(getPercentError(), 0.0, epsilon); } public double getPercentError() { if (targetRPS != 0.0) return getError() / targetRPS; else return 0.0; } public double getError() { return targetRPS - currentRPS; } public void calcOutput() { if (targetRPS == 0.0) this.setVoltage(0.0); else this.setVoltage((targetRPS + getError() * kLauncherKp) * kLauncherKv + kLauncherKs * Math.signum(targetRPS)); } private void setVoltage(double voltage) { //flywheelMasterNeo.set(voltage / 12); } }
Nickel-catalyzed Mizoroki-Heck- versus Michael-type addition of organoboronic acids to alpha,beta-unsaturated alkenes through fine-tuning of ligands. Various arylboronic acids reacted with activated alkenes in the presence of , ZnCl2, and H2O in CH3CN at 80 degrees C to give the corresponding Mizoroki-Heck-type addition products in good to excellent yields. Furthermore, 1 equivalent of the hydrogenation product of the activated alkene was also produced. By tuning the ligands of the nickel complexes and the reaction conditions, Michael-type addition was achieved in a very selective manner. Thus, various p- and o-substituted arylboronic acids or alkenylboronic acid reacted smoothly with activated alkenes in CH3CN at 80 degrees C for 12 h catalyzed by Ni(acac)2, P(o-anisyl)3, and K2CO3 to give the corresponding Michael-type addition products in excellent yields. However, for m-substituted arylboronic acids, the yields of Michael-type addition products are very low. The cause of this unusual meta-substitution effect is not clear. By altering the solvent or phosphine ligand, the product yields for m-substituted arylboronic acids were greatly improved. In contrast to previous results in the literature, the present catalytic reactions required water for Mizoroki-Heck-type products and dry reaction conditions for Michael-type addition products. Possible mechanistic pathways for both addition reactions are proposed.
<reponame>lopotun/TQuickFix package net.kem.tquickfix.blocks; import net.kem.tquickfix.QFParser; import java.text.DateFormat; import java.util.Date; /** * */ public abstract class QFTimeField extends QFDateTimeField { protected QFTimeField(String name, int number, String rawValue, Validation validateValue) { super(name, number, rawValue, validateValue); } protected QFTimeField(int number, String name, Date value, Validation validateValue) { super(number, name, value, validateValue); } protected DateFormat getDateFormat() { return QFParser.getInstance().getThreadContext().getTimeFormatter();//df = new SimpleDateFormat("HH:mm:ss:S"); } }
public int findd(int x) { if (dSets.get(x).parent == x) { return x; } return dSets.get(x).parent = findd(dSets.get(x).parent); }
“We basically decided on principle that we couldn’t work for a country that was using snipers on rooftops to pick off its citizens,” said Gregory L. Vistica, the firm’s president, who first announced the decision on Facebook. Others have stayed the course, at least for now. Mr. Moffett, Mr. Livingston and Mr. Podesta, who have a joint, multimillion-dollar contract with Egypt, have stepped up the pace of their meetings and phone conferences with Egyptian Embassy officials after the resignation of President Hosni Mubarak. One of the chief aims, the lobbyists say, is to help the military officials now running the country move toward elections that will be regarded as free and fair outside Egypt. “What we have done for them in the past is what we will continue to do for them in the future — everything in our power to build good relations between the Egypt of today and the United States,” said Mr. Livingston, a former Louisiana congressman who is one of Egypt’s lobbyists. Photo At the same time, Mr. Livingston acknowledged that he was closely watching the situation in the region. “Is there a danger that the whole area might become Islamist and radical and totally opposed to the interests of the United States?” he asked. “Certainly there’s that risk.” At Qorvis, a global public relations firm that has represented numerous countries in the region, including Saudi Arabia, Bahrain, Yemen and Cyprus, executives from the firm’s Washington office were visiting the Middle East this week with a business-as-usual attitude. “Our clients are facing some challenges now,” Seth Thomas Pietras, senior vice president of Qorvis Geopolitical Solutions, said in a telephone interview from Dubai. “But our long-term goals — to bridge the differences between our clients and the United States — haven’t changed. We stand by them.” As a rule, leaders in the Middle East have paid consultants generously, even by Washington lobbying standards, with monthly retainers commonly reaching $50,000 or more, according to federal filings. Advertisement Continue reading the main story (Price breaks are available, however: the law firm of White & Case promised Libya “a special 15 percent discount off of our standard rates” in light of the “significant relationship” it hoped to forge with Col. Muammar el-Qaddafi’s country in 2008, according to the contract.) Newsletter Sign Up Continue reading the main story Please verify you're not a robot by clicking the box. Invalid email address. Please re-enter. You must select a newsletter to subscribe to. Sign Up You will receive emails containing news content , updates and promotions from The New York Times. You may opt-out at any time. You agree to receive occasional updates and special offers for The New York Times's products and services. Thank you for subscribing. An error has occurred. Please try again later. View all New York Times newsletters. The United Arab Emirates spent $5.3 million in 2009 for lobbying American officials — second only to the Cayman Islands, which has lobbied to retain its status as a tax haven, according to an analysis by Sunlight Foundation, a nonprofit research group. Working through DLA Piper and other Washington-based firms, the U.A.E. has sought greater access to American nuclear technology. Morocco spent more than $3 million on Washington lobbyists, much of it aimed at gaining an edge in its border dispute with Algeria, while Algeria countered by spending $600,000 itself. Turkey, which shares some interests with the Middle East countries, spent nearly $1.7 million in 2009 to lobby American officials on Turkish and Middle Eastern policy through the firms of Richard A. Gephardt, a former House leader, Mr. Livingston and other prominent lobbyists. Photo And Saudi Arabia, one of the most powerful foreign interests here, spent about $1.5 million in 2009 on Washington firms, and it has a $600,000 annual contract with Hogan Lovells aimed partly at fighting legislation and litigation that would challenge OPEC’s influence over oil prices. “These kinds of regimes have a lot of money at their disposal, and that’s a great attraction,” said Howard Marlowe, president of the American League of Lobbyists. Still, he said, “a number of lobbyists will stay away from international clients — period.” To work with dictators in Middle Eastern nations with policies that many American find unsavory, he said, “you have to have a strong stomach.” Mr. Livingston, the former congressman lobbying for Egypt, has also done work for Libya in seeking to resolve legal claims arising from Libya’s role in the bombing of Pan Am Flight 103 and normalize the country’s relations with the United States. But he said he reached a tipping point in 2009 when Libya welcomed back with open arms a bomber convicted in the Pan Am case and when Colonel Qaddafi threatened to pitch a tent in New Jersey next to a Jewish yeshiva while visiting the United Nations. Advertisement Continue reading the main story “Those two incidents were just more than we could handle,” Mr. Livingston said. Soon after, his firm ended its work for Libya — with “no regrets,” he said. Other major Washington firms, including White & Case and Blank Rome, a legal and lobbying shop, have also ended their work for Libya, which spent about $850,000 on United States lobbying in 2009. It is not clear from federal records which Washington firms, if any, are still working with Colonel Qaddafi’s government; none have been publicly admitting it. As demonstrations were taking place in Egypt last month, Mr. Moffett said a friend suggested to him that his lobbying work for the Mubarak government put him “on the wrong side of the Egyptian thing.” Mr. Moffett demurred. “I don’t feel that way at all,” he said. “We feel honored to be on the scene while all this is happening.”
But he’s about to break out in a big way. It has to happen. His beers, many of them aged in barrels and brewed in the Belgian style, are so skillfully made, dynamic, food-friendly and distinctly delicious that they cannot remain a niche item for much longer. I can understand why. Several of Mraz’s creations, including that saison and another made with peaches, are among the most memorable beers I tasted in 2015. How can beer be so big and full-flavored, yet be tempered with such balance and elegance? When I visited the brewery recently, Mraz, whose creative energy always seems to be churning, was excited about a new saison made with 50 pounds of kiwi and dry-hopped with Nelson Sauvin hops from New Zealand. I can only imagine how that must taste. It was going to spend more time in the barrel before being released to his adoring fans. Mraz is working this kind of magic with just three employees toiling in a tiny space in an El Dorado Hills shopping center. Earlier in 2015, he acquired a 3,000-square-foot warehouse 5 miles away to store the beer aging in barrels. Mraz is not one to toot his own horn. Indeed, the brewery’s labels, while artistic and dramatic, have the name “Mraz” in minuscule letters that almost no one could spot on store shelves. “I let my beers stand for themselves. Our logo stands out, and you’ll see the wax (seal) for the barrel-aged ones,” he said when told that his beers can be a challenge to run down in stores. Mraz believes in his quest to reach brewing nirvana. He cashed in his 401(k) from his days in the automotive repair industry to start Mraz Brewing in May 2013. He was a foodie and wine enthusiast before he became a brewer, which explains his approach to flavor and his appreciation for balancing the acidity in his sours. Those eight-hour lines for famous beer in downtown Santa Rosa have not happened yet at this tiny El Dorado Hills brewery. But when the beer is consistently this good, the clamoring for all things Mraz is bound to grow. As for GABF, Mraz, like scores of other commercial brewers, has submitted his best work for scrutiny by the judges. He’ll wait and see how the awards sort themselves out. There is no more competitive beer event in the United States.
package com.atguigu.gulimall.product.dao; import com.atguigu.gulimall.product.entity.CommentReplayEntity; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import org.apache.ibatis.annotations.Mapper; /** * 商品评价回复关系 * * @author feifei * @email <EMAIL> * @date 2021-07-10 23:59:53 */ @Mapper public interface CommentReplayDao extends BaseMapper<CommentReplayEntity> { }
Two things on Carlson's resume stand out as atypical: He's been a head coach at every level from the NAIA to NCAA Division-I and he's had uncommon success with programs he started from scratch. It was those those things that caught the eye of Lindenwood-Belleville Athletic Director Scott Spinner when he tagged the well-traveled Carlson to replace Jeff Fisher late in July. "It stood out to us that he had built two quality and nationally-ranked programs in a relatively short period of time," Spinner said. "What he's done in his career — and at so many levels of college football — is really impressive. He's the right guy to take this program where we want it to go." That journey begins on the Lynx's candy-striped home field at 1 p.m. Sept. 4 when Lindenwood-Belleville plays host to Siena Heights of the Wolverine-Hoosier Athletic Conference. Fisher started laying the foundation of the Lindenwood-Belleville program in 2012, but was unable to advance beyond four-win highs posted in each of his first two seasons. The Lynx won just two games last year and Fisher was fired with a 10-21 record. Carlson's 111-124-1 lifetime record, meanwhile, was spoiled only by four dismal seasons at Valparaiso that yielded just three wins. Prior to signing on at the NCAA Division I-FCS level, he led Division III and NAIA programs from launch to national prominence on a four-year-or-less plan. "I never really had my eyes set on being a Division I coach because I kind of decided I wanted to be an influence on the kids and be a teacher," he said. "That's not to say there aren't Division I coaches who don't do that. I know there are. But the other demands at the BCS level, I think, don't allow you to be that mentor and be that teacher. "So I chose to be a small college head coach." On that front, he's had nothing but success. Ohio Dominican University finished 12-1 in NCAA Division III in 2007, just three years after Carlson brought football to the school for the first time. He started football at Indiana's Trine University (then known as Tri-State) in 1995 and by 1998 had the Thunder in a NAIA national semifinal game. "The first time I (launched a program) I was shooting from the hip and really had no idea," Carlson said of his time at Tri-State. "The second time, we were a little better organized just having gone through it before, and we a were a little quicker than the last time getting the team to the top of the conference and the national playoffs." Carlson, a linebacker during his playing days at Concordia University in Chicago, has earned a reputation for building high-scoring offenses that rely heavily on the arms of his quarterbacks. The 1998 national semifinalist at Trine averaged 427 yards of total offense per game, 232 of which came through the air. That team scored 384 total points in 12 games. Carlson's 2007 team at Ohio Dominican was even more dynamic, averaging 47.8 points and 429 passing yards over 13 games. Carlson's six years in Ohio yielded five All-Americans, seven Academic All-Americans, and more than 90 All-Conference performers. Carlson bristles at his reputation as an offensive-minded coach, pointing out that success came from both sides of the line. "When you look back at those championship years and runs we had at Tri-State and Ohio Dominican, the offense got a lot of pub but our defenses were top 20-25, too," he said. ""People kind of looked at me and say I'm a very offensive-minded coach. But, really, as a head coach, if you can't perform in all three phases — offense, defense and special teams — you're not going to win very many games." In 2009 Carlson was named the NAIA Independent Coach of the Year while earning MSFA-Mideast League Coach of the Year honors in 2007. He also earned American Football Coaches Association Region II and American Football Monthly/Schutt Sports NAIA National Coach of the Year honors in 2007. He's not making any predictions as to when the Lynx will reach the elite level, but believes that competing in the American Midwest Conference every week offers the best test. "You can argue about which league is the best, but if you look at who's going to the championships the last couple years and it's been a Mid-States team that's been involved," he said. "If you can be successful in this league, you can be successful on a national level." For Lindenwood-Belleville senior running back Kam Harris, a product of Althoff, the arrival of Carlson is bittersweet. Fisher was a mentor to Harris, but the chance the opportunity to work under a coach with Carlson's resume is exciting, he says, even if it costs him some touches. "I feel like we're going to have to adapt to him because does thing different," Harris said as he walked on the field for his third practice in Carlson's system. "But it's football and football is about adapting to situations. Coach likes to pass the ball a lot and that involves me as a player of course. "But it's been a long three years and it's time to get everything that we've worked for. We've been through the bad and it's time to get some good."
Towards Building eco-friendly and emission less Electric Scooter Electric vehicles are becoming more important, as not only to reduce carbon emission but also to reduce the dependency on normal combustion engine vehicles. Most of the universities have really big campuses. To make the mobility in campus easier, the harmless and power-controlled vehicle with safety technologies is introduced. Besides, it also reduces time consumption. Further, it aids differently abled persons and aged professors. This paper presents the design and development of a compact, portable and weightless electric skating scooter. The vehicle body design is inspired from the sea cartilaginous fish String Ray. It also includes mechanical features like front shock absorber, handlebar break control, portable and handle bar height adjustment. The electrical and electronic features such as obstacle detector, fingerprint and RFID (Radio-frequency identification) access, battery management system (BMS) etc., help towards building a smart vehicle. Besides, it also provides a vehicle management system for tracking the user details, location and condition of the vehicle through a server. The proposed system with addition of mechanical, electrical and electronic features will help towards enhancing the performance of an electric scooter for easy mobility. The results obtained from the on-road test, CADD software and the implementation of vehicle management system has been discussed. Introduction In the tech era, where time rises for the revolution in the field of motor vehicle, e-vehicles plays an important role in the development phase. In India, a developing nation, where several decisions and steps are being taken to reduce the usability of petrol-and diesel-powered cars by 2030, the development of e-vehicles will make a rise in usability and a sustainable environment. They have fewer moving parts than conventional cars. This is greatly helpful in reducing pollution and has zero exhaust emissions. In future, the price of electric-vehicle will fall. Decreased usage of petroleum and gasoline and motor oil means fewer spills and pollution in oceans, rivers and groundwater. Universities and industry campuses generally have serious mobility problems. So, the mobility for disabled students and aged professors is even more a tough job. The main aim of our project is intra college mobility. To make the mobility inside campus easier, the harmless and power-controlled vehicle with safety technologies can be used which reduces time consumption and helps disabled people. Considering the problem, the work focuses on an electric skating scooter which is really compact, portable and weightless. This electric scooter has safety electronic technologies. The maximum speed of the scooter is 25 kilometres per hour. Our electric skating scooter has a modern design which is compact and easy to carry. The scooter handlebar height can be adjusted according to the rider's need. The scooter has a front shock absorber which smoothens the ride. Electronic safety technologies like obstacle detector, fingerprint access, BMS (Battery Management System) to monitor voltage, temperature, SOC and SOH are really useful for the rider ensuring comfort and safety while riding the scooter. This technology lowers the possibility of the occurrence of accidents on campus. The main objective is to design and develop an electric scooter for short distance locomotion. Specifically, it aims to:  Develop a light weight and easy portable scooter.  Develop a lost cost E-vehicle.  Propose a novelty in the basic design of the scooter unlike, normal electric scooters in the market.  Develop the outer body resistance to environmental calamities.  Develop an eco-friendly and low-cost fuel vehicle. Literature review In recent years, many research works and projects have been done in the field of electric vehicles especially in the field of Li-ion batteries and motors. In a study was made to look into the power flow calculation and the design of electric vehicle model is done using MATLAB to get the best power flow response to the energy system of the vehicle. A mathematical modelling and analysis on the powertrain were done in for the use of split-power system. This helps to verify the operational capabilities of the motor under varying optimal conditions. Based on wireless transfer protocol, a hybrid energy system was designed and implemented in using super capacitors for high performance of the scooter. Challenges faced by electric two wheelers in mountain roads was proposed in. An electric scooter simulation program was developed in to enable more improvement in the driving skills of the users. A portable electric scooter was designed in to make the access of the vehicle much easier and more comfortable. An integrated power module for electric scooter was designed in to improve the efficiency of power board. Taking into the account conservation of energy a solarized electric scooter was developed in. A different approach was taken in to improve the appearance and ergonomic performance using anthropometric measurement. Motor control using PID and Fuzzy PID controller is overviewed in. Vehicle performance calculations done in gives a clear insight on the parameters that has to monitored during the on-road test of the electric vehicle. The graphs shown in indicates the battery performance, consumption and efficiency during the real-world scenarios. A comprehensive evaluation on battery technologies in the electric vehicle market is done in. This helps to analyse and compare the different types of batteries available in the market. Incentive's knowledge is essential for consumer awareness on electric vehicles. This is explained in. Several papers and literatures related to electric vehicles has been discussed. Materials and Methods In a qualitative experimental study, the practical difficulties in implementing an electric vehicle are analysed. From the study, knowledge is gained on the real parameters to be taken care of while implementing an electric vehicle. After the completion of successful design analysis in Solid works, the mechanical design is developed using aluminium sheet. Motor controller, Li-ion battery is connected and the vehicle is started by connecting the wires manually. To monitor the vehicle and battery certain modules like vehicle management and monitoring system are developed. Figure.1 explains the working flow the electric scooter. The algorithm of the work flow is given below. STEP 5: User details are sent to the cloud and displayed on the server. STEP 6: Enable and Initialize the GPS module. STEP 7: Location is displayed on the server. STEP 8: If the scooter starts moving speed, battery percentage are displayed on the LCD. STEP 9: If the log out button is pressed the scooter state changes from ON to OFF. STEP 10: User log out details are updated on the server. From Figure 2 it is very clear that, the battery is the primary energy source or fuel for the vehicle. It sends switch-mode signal to the controller to drive the motor. A DC-DC converter is used to step down the voltage of the battery to 6 or 12v for the purpose of need. Hub motor is used here because of the advantage that they are independent drive system and they require little maintenance. Arduino MEGA is the main ECU to monitor the parameters like speed, battery percentage, location of the vehicle and also control the ON and OFF of the vehicle with the help of RFID. All these parameters are measured with the help of IR sensors, BMS, GPS module and RFID tag. 48v Li-ion battery is used as the fuel source of the vehicle. Battery is connected to the motor with the help of 48v motor controller. List of components used to design and develop the electric scooter is listed in table 1. The main systems involved in developing the electric scooter are battery, battery management system, motor, motor controller, vehicle monitoring system and vehicle management system. The working and functions of all these subsystems are discussed below. Battery The battery acts as the main fuel source for the vehicle. At recent days, lead -acid batteries are getting replaced by Li-ion batteries because of their less running cost and less maintenance. The battery pack used in this is electric vehicle is Li-ion. The supply from the battery is controlled using a motor controller and this is given as input to the motor. The battery will be connected to the motor driver to deliver power directly to the motor. Motor Gearless hub motor is used to drive the vehicle. It is placed in the center part of the wheel. Hub motor is actually a BLDC motor. Hall effect sensor is used to identify the position of the rotor very precisely with the help of variation in magnetic fields. The position of the rotor is identified with the help of hall sensor. The position of rotor is fed to the microprocessor unit with help of Rotor position circuitry. The microprocessor gets input from the hall sensor and based on the input fed PWM signal is sent to the switching circuit to energize the respective coils. When the permanent magnet passes through any one of the sensors, the sensor produces a positive or negative (high/low) signal which is used to determine the rotor position (N/S pole). Based on the output from the hall effect sensor, pulse width modulated signal is generated. Pulse width modulation is a technique used to control the speed of many devices. Motor Controller Motor controller is the brain of EV which basically controls the motor. The parameters monitored by the motor-controller are Motor speed, expected battery range, acceleration, direction, torque, overload protection, start and stop of the vehicle and regenerative braking. Motor controller controls the current and voltage of the battery to be supplied to the motor. Based on this, desired output power and the speed and torque are controlled. A microprocessor inside the motor-controller will take care all of these calculations. So, the processing speed of the processor should be very high. Vehicle Monitoring System ATmega328PArduino Mega serves as a microcontroller unit among the battery, sensors and vehicle tracking system. In the vehicle, an IR sensor is placed near the back wheel. Whenever the sensor detects white colour on the wheel, the count is made as 1. By this method, the rpm of the wheel is calculated using RPM: diameter of the wheel*rpm*0.001885. With the help of rpm speed can be calculated which is displayed on the display mounted on the handle bar. Here the RFID tag and reader acts as transmitter and receiver. Global Positioning System is a satellite-based system used for tracking the location with time and range. The controller then sends the information the cloud to display the location in the webpage. Thin Film Transistor Liquid Crystal Display is used as a dashboard in this vehicle. The main purpose of this project is to serve intra-campus locomotion. Therefore, maintaining and monitoring all the vehicle inside the campus is not an easy task. For this purpose, a vehicle tracking system has been developed, under which the campus management can monitor the location, user details and temperature of the electric vehicle. Results and Discussion In order to achieve better performance and reduce real-time difficulties in the making of vehicle, the estimation of the performance is done using CADD software. The mechanical design of the proposed system is shown in the Figure 3,4,5,6. In the electric scooter, AISI 4130 is selected as the roll cage material due to its high strength to weight ratio of 72 to 130 kNm/kg. The proposed design provides up to 25.5 percentage of elongation. The wheel base is 548 mm so that it provides a minimum turning radius of 1.3 m. The scooter is designed for a load carrying capacity of 120 kg. Results obtained from the onroad test is listed in the table 2. Based on the results obtained from the speed test, the battery used in the electric scooter has a discharging rate of 4.3 hrs. The range that can be covered at the economy mode is 43 km. The RFID system used in the scooter unlocks the vehicle within 3 seconds when the tag is placed near the reader. Once the user unlocks the scooter, user information is sent to the server and the vehicle is monitored. Since, lithium-ion battery pack is used in this scooter, all safety parameters like temperature, voltage and current discharge, SOC have monitored under all circumstances. The functions of the scooter like throttle performance, rolling resistance (35.316 N), gradient resistance (153.3 N), aerodynamic drag(8.47 N) and braking system have been checked thoroughly.
<filename>chapter_001/src/main/java/ru/job4j/tasks/CarInsurance.java package ru.job4j.tasks; public class CarInsurance extends Insurance { public static final int HIGH = 200; public int premium(){ return HIGH; } public static String category(){ return "Car Insurance"; } }
Winters v. United States Water rights Water rights are extremely important to American Indians, especially those American Indian tribes living in the West, where water supplies are limited. American Indian reservations, and those who live within them, rely on water sources for the water necessary for them to be self-sufficient. American Indian reservations rely on streams and rivers for agricultural purposes. Not only is the water itself important to the American Indian reservations, but also what the water contains. By having the rights to an area of water, one also gains rights to what is in the water. This gives an implied right to fish the waters. Because life relies on water, it may be fair to say that who controls the water ultimately has control over life on the reservation. Riparian system The Riparian water system is the system controlling water use in the eastern states where water is found to be more plentiful. Under this system the owner of the land bordering the source of water is entitled to use of said water. This system is sufficient for the states where water is found in abundance, but in the less water rich western states the control of water must be handled differently. Appropriative system In the western part of the country, water ownership is controlled by the appropriative system. This system states that the owner of a piece of land does not automatically own the rights to water found on that land. Rights to water belong to the first user who puts the water to beneficial use. The first people to become appropriators of the water source have the right to continue using the water in the same quantity as always as long as they continue putting the water to good use. This holds true no matter how many other people wish to use the water. The latest water appropriator loses all of their water rights before any water rights are taken from the next latest appropriator. The Fort Belknap Reservation in Montana, which had been created by the government in 1888, experienced issues with the appropriative water system when water flow to their reservation was being diverted to settlements of non American Indians. Fort Belknap American Indian Reservation The Fort Belknap Indian Reservation was created in 1888 in Montana. It was created from what had once been a much larger area of land to be set aside for tribes. The 1888 agreement neglected to mention any water rights that were reserved for the reservation in relation to the Milk River. Soon there came a huge demand for water by non-Indian settlers which was an issue for the Fort Belknap American Indian reservation. As non-Indian settlers began moving closer to the Fort Belknap Reservation, the settlers claimed rights to the water. The settlers did things such as build dams and reservoirs which prevented the reservation from receiving water needed for agricultural purposes. The settlers used the terms of the Appropriative water system to support their actions claiming that they had appropriated the water before the American Indians living on the reservation had put the water to beneficial use. Decision The United States Supreme Court case of Winters v. United States held that the decree enjoining the companies from utilizing river waters intended for an American Indian reservation was affirmed. It was also held that when American Indian reservations were created by the United States government, they were created with the intention of allowing the American Indian settlements to become self-reliant and self-sufficient. As American Indian reservations require water to become self-sufficient in areas such as agriculture, it was found that water rights were reserved for tribes as an implication of the treaties that created the reservations. Majority opinion The Supreme Court came to the decision that the Fort Belknap reservation had reserved water rights through the 1888 agreement which created the Fort Belknap American Indian Reservation. It was found unnecessary for the Indians to have to reserve the water rights if they had already reserved the rights to the land for agricultural purposes because the Indians would have no use for the farmland if they could not have access to a water source. It was decided that the water rights of the Milk River were implied when the Fort Belknap American Indian Reservation was created in order to uphold provisions that had been previously stated. The majority opinion was delivered to the United States Supreme Court by associate justice, Joseph McKenna. McKenna wrote that five of the defendants named in the bill failed to answer. He wrote that the other defendants who did answer filed a joint and several answer. From this answer, the case was heard and a decree was entered against all of the defendants. It was determined by the Supreme Court that the reasoning behind the establishment of American Indian reservations was to provide a permanent homeland for the American Indians. The majority opinion found that the decree held. The majority opinion was held by Chief Justice Melville W. Fuller and Associate Justices William R. Day, Oliver Wendell Holmes Jr., Joseph McKenna, William H. Moody, Rufus Wheeler Peckham, and Edward D. White. After the verdict had been reached, the United States government allocated $25,000 to be used for the purpose of extending the irrigation system on the Milk River for use by the Fort Belknap American Indian reservation. Dissenting opinion Associate justice David J. Brewer dissented from the majority opinion. Implications The Winters court reasoned that water rights were implied in the agreement that had been made with the American Indians in 1888, when the reservation was created. This agreement stated that the Fort Belknap Reservation had been created with the intention of the tribal people being able to become self-sufficient. The court noted that land without water has no value, especially when the purpose of a land was to help a group become self-supporting in the way of agriculture. Therefore, a reservation of water goes along with the reservation of the land. Water rights may be implied from American Indian reservations made by presidential executive order, or American Indian reservations which are created by an act of Congress. Other implications of this court case include setting more of a standard for American Indian water rights along with setting a precedent for later Supreme Court cases which deal with implied water rights. Effects following ruling Although the ruling of Winters v. United States was made very clear, accounts show that water rights relating to American Indian reservations were put aside and neglected for decades after the ruling. While the United States government was caught up in the emergence of non-Indian settlers moving west, the government seemed to turn a blind eye to many non-Indian settlers who were making use of water sources which, under the terms of Winters v. United States, had been reserved for American Indian reservation use. The United States Supreme Court was not called upon to further define American Indian reserved water rights until the case of Arizona v. California in 1963. Related cases Winters v. United States was a United States Supreme Court Case with many implications. One thing that makes this case so monumental is the precedent that is set by it for United States Supreme Court cases that would follow it. Arizona v. California Arizona v. California was a set of 11 United States Supreme Court cases dealing with water rights. These cases took place between the years of 1931 and 2006. The initial question of this case was to determine how much water from the Colorado river Arizona was entitled to. Many western states became involved in the debate over the rights of the water from the Colorado River, and finally the United States government became involved stating that several federal establishments, including five American Indian reservations, had water rights as defined by Winters v. United States. This United States Supreme Court case helped to solve a problem found in the case Winters v. United States. While the United States Supreme Court case of Winters v. United States held that American Indian Reservations do have reserved water rights equal to the amount of water needed on the reservation to sufficiently irrigate all of the irrigable reservation acreage, there was always the question of how to decide what amount of water was needed to sufficiently irrigate on the American Indian reservations. Arizona v. California offers the solution of adjudication to help fix this problem. Arizona v. San Carlos Apache Tribe of Arizona This case dealt with either the United States as trustee or certain American Indian tribes asserting their rights to have certain Indian water rights in Arizona or Montana determined in federal court. The court ruled that all limits that any federal legislation put on state-court jurisdiction over Indian water rights were removed by the McCarran Amendment. This piece of legislation allowed state courts jurisdiction to determine American Indian water rights. This ruling included suits brought by American Indian tribes and pertaining to only American Indian claims. The decision of this case was that the judgment in each of the cases was reversed, and the cases were to be reviewed further. Nevada v. United States This United States Supreme Court case centered around water rights involving the Truckee River. The defendants in the case were all people who used water from the Truckee River, while the plaintiff was the United States. The defendants argued against American Indian tribal use of the water in the Truckee River stating that the American Indian tribes were not parties to the original cause of action between the United States and the non-American Indian users of the water. The court ruled that the American Indian tribes did have water rights and were allowed to make use of the water in the Truckee River. United States v. New Mexico The United States claimed to have reserved the use of water out of the Rio Mimbres stream only where necessary to preserve the environment and wildlife. For instance, to care for the timber in the forest or to secure favorable water flows. The United States Supreme Court upheld the ruling made earlier by the Supreme Court of New Mexico. This ruling stated that the United States did not have reserved rights in the Rio Mimbres stream when it came to recreational purposes. Cappaert v. United States Devils Hole cavern in Nevada became a detached part of Death Valley National Monument in 1952, by a proclamation of President Harry S. Truman made under the Antiquities Act. The cavern is home to a rare species of desert fish, the Devils Hole pupfish (Cyprinodon diabolis). In 1968 the Cappaerts, who were ranchers, were granted an application by the Nevada state engineer to begin using a water supply which took water from Devil's Hole cavern, which lowered water levels in the cavern and endangered the viability of the fish. The federal government sought to place limits on the Cappaerts' use of the water, so as to protect the fish from extinction. The U.S. Supreme Court ruled in favor of the United States. The Court held that the implied-reservation-of-water-rights doctrine applies to groundwater as well as surface water. The Court next reaffirmed that "Federal water rights are not dependent upon state law or state procedures and they need not be adjudicated only in state courts." Finally, the Court held that when the United States had reserved Devil's Hole in 1952, "it acquired by reservation water rights in unappropriated appurtenant water sufficient to maintain the level of the pool to preserve its scientific value" (i.e., preserve the fish, which are "objects of historic or scientific interest" under the American Antiquities Preservation Act). Colorado River Water Conservation Dist. v. United States The United States Supreme Court case Colorado River Water Conservation District v. United States was a court case resulting in the abstention doctrine which helped to prevent duplicate litigation between state courts and federal courts. United States v. Powers This United States Supreme Court case occurred over the argument of tribal water rights, and whether or not the water rights are passed along with the tribal land. When American Indian reservations would sell allotments of land to non-tribe members, those to whom the land was sold would want the same proportion of the reservation's water that the previous American Indian land owner had received. The Supreme Court upheld the earlier ruling that water rights are passed along with the land, meaning that a person who purchases land from an American Indian reservation also purchases an allotment of the water source used on the reservation.
package ru.osokin.stamp; import com.itextpdf.text.DocumentException; import org.junit.Test; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class Stamper2PdfTest { private static final String TIMES_TTF = "times.ttf"; private static final String BORDER_IMAGE_SMALL_PNG = "/border/borderImageSmall.png"; private static final String BORDER_IMAGE_PORTRAIT_PNG = "/border/borderImagePortrait.png"; private static final String BORDER_IMAGE_LANDSCAPE_PNG = "/border/borderImageLandscape.png"; private static final String LANDSCAPE_PDF = "/pdf/landscape.pdf"; private static final String PORTRAIT_PDF = "/pdf/portrait.pdf"; private static final String MULTI_PAGES_PDF = "/pdf/multiPages.pdf"; private static final int FONT_SIZE = 6; private static final int RED = 28; private static final int GREEN = 73; private static final int BLUE = 255; @Test public void drawStamp2LandscapePdf() throws IOException, DocumentException { File result = File.createTempFile("stamp", ".pdf"); System.out.println(result.getAbsolutePath()); PdfFactory pdfFactory = new PdfFactory() .addMiddleStamp(getBigStamp(), 10, 10); pdfFactory.drawAllStamps(Stamper2PdfTest.class.getResourceAsStream(LANDSCAPE_PDF), new FileOutputStream(result)); } @Test public void drawStamp2PortraitPdf() throws IOException, DocumentException { File result = File.createTempFile("stamp", ".pdf"); System.out.println(result.getAbsolutePath()); PdfFactory pdfFactory = new PdfFactory() .addMiddleStamp(getBigStamp(), 10, 10); pdfFactory.drawAllStamps(Stamper2PdfTest.class.getResourceAsStream(PORTRAIT_PDF), new FileOutputStream(result)); } @Test public void drawStamp2MultiPagePdf() throws IOException, DocumentException { File result = File.createTempFile("stamp", ".pdf"); System.out.println(result.getAbsolutePath()); PdfFactory pdfFactory = new PdfFactory() .addMiddleStamp(getBigStamp(), 10, 10); pdfFactory.drawAllStamps(Stamper2PdfTest.class.getResourceAsStream(MULTI_PAGES_PDF), new FileOutputStream(result)); } @Test public void drawStamp2MultiPagePdf_DiffStamps() throws IOException, DocumentException { File result = File.createTempFile("stamp", ".pdf"); System.out.println(result.getAbsolutePath()); PdfFactory pdfFactory = new PdfFactory() .addMiddleStamp(getSmallStamp(), 10, 80) .addMiddleStamp(getSmallStamp(), 10, 10) .addLastStamp(getBigStamp(), 10, 10) .addLastStamp(getBigStamp(), 10, 90); pdfFactory.drawAllStamps(Stamper2PdfTest.class.getResourceAsStream(MULTI_PAGES_PDF), new FileOutputStream(result)); } private Stamp getSmallStamp() throws IOException, DocumentException { return new Stamp(2, 100, 160) .setStampDataList(getSmallStampDataList()) .setBorder(new Border(Stamper2PdfTest.class.getResourceAsStream(BORDER_IMAGE_SMALL_PNG), Stamper2PdfTest.class.getResourceAsStream(BORDER_IMAGE_SMALL_PNG)) .setPortraitSize(260, 50) .setLandscapeSize(260, 50)) .setVisibleTableGrid(false) .setPadding(new Padding(5, 20, 5, 5)) .setFont(new StampFont(Stamper2PdfTest.class.getResourceAsStream("/" + TIMES_TTF), TIMES_TTF, FONT_SIZE, "Cp1251").setColor(RED, GREEN, BLUE)); } private Stamp getBigStamp() throws IOException, DocumentException { return new Stamp(4, 120, 200) .setStampDataList(getBigStampDataList()) .setBorder(new Border(Stamper2PdfTest.class.getResourceAsStream(BORDER_IMAGE_PORTRAIT_PNG), Stamper2PdfTest.class.getResourceAsStream(BORDER_IMAGE_LANDSCAPE_PNG)) .setPortraitSize(570, 75) .setLandscapeSize(820, 75)) .setVisibleTableGrid(false) .setPadding(new Padding(20, 10, 5, 5)) .setFont(new StampFont(Stamper2PdfTest.class.getResourceAsStream("/" + TIMES_TTF), TIMES_TTF, FONT_SIZE).setColor(RED, GREEN, BLUE)); } private List<StampData> getBigStampDataList() { List<StampData> stampDataList = new ArrayList<>(); stampDataList.add(new StampData("Certificate", "TEST0000CERTIFICATE0000STRING")); stampDataList.add(new StampData("", "")); stampDataList.add(new StampData("Owner", "<NAME> (Java developer)")); stampDataList.add(new StampData("Date of transaction", "11.12.2018 00:01:57")); stampDataList.add(new StampData("", "My home company")); stampDataList.add(new StampData("Document hash", "0689db4a-1120-46a5-b6d6-d81c4e739366")); stampDataList.add(new StampData("Valid", "from 01.01.2017 17:30+0300 to 31.12.2019 17:40+0300")); stampDataList.add(new StampData("", "")); return stampDataList; } private List<StampData> getSmallStampDataList() { List<StampData> stampDataList = new ArrayList<>(); stampDataList.add(new StampData("Date of transaction", "11.12.2018 00:01:57")); stampDataList.add(new StampData("Received", "11.12.2018 00:02:04")); stampDataList.add(new StampData("Document hash", "0689db4a-1120-46a5-b6d6-d81c4e739366")); return stampDataList; } }
<filename>src/main/java/org/wildfly/security/auth/permission/RunAsPrincipalPermission.java /* * JBoss, Home of Professional Open Source * Copyright 2013 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wildfly.security.auth.permission; import java.security.Permission; /** * The permission to run as another principal within some security domain. */ public final class RunAsPrincipalPermission extends Permission { private static final long serialVersionUID = -3361334389433669815L; public RunAsPrincipalPermission(final String name, final String securityDomainName) { super(compileName(name, securityDomainName)); } private static String compileName(final String name, final String securityDomainName) { if (securityDomainName.indexOf(':') != -1) { throw new IllegalArgumentException("Security domain name is invalid"); } return securityDomainName + ":" + name; } public boolean implies(final Permission permission) { return equals(permission); } public boolean equals(final Object obj) { return obj instanceof RunAsPrincipalPermission && equals((RunAsPrincipalPermission) obj); } public boolean equals(final RunAsPrincipalPermission perm) { return perm != null && perm.getName().equals(getName()); } public int hashCode() { return getName().hashCode(); } public String getActions() { return ""; } }
Alpha-macroglobulin-induced release of anti-Ig-coated particles from a subpopulation of rabbit B lymphocytes. Approximately half of the rosettes formed by rabbit Ig+ lymphocytes (B cells) and anti-coated erythrocytes or glutaraldehyde-fixed bacteria are dissociated upon the addition of rabbit serum. Rabbit serum was fractionated and the rosette-dissociating activity was found in purified preparations of rabbit alpha 1- and alpha 2-macroglobulins. Studies designed to elucidate the mechanism of rosette dissociation suggested that the alpha-macroglobulins dissociated rosettes by causing the release or proteolytic cleavage of the membrane proteins complexed with the anti-Ig-coated particles. These data suggest that the alpha-macroglobulins may have a role in the interaction of B lymphocytes with particulate antigens.
A Framework for Developing and Experimenting with Parallel Process Architectures to Support High-Performance Transport Systems Multi-processing is a promising technique for improving the performance, scalability, and cost effectiveness of communication subsystems. Improving performance is becoming increasingly important to alleviate bottlenecks resulting from network transmission speeds that now often exceed the processing capacity of end-systems. This paper describes a modular framework for developing and experimenting with process architectures for bus-oriented, shared memory multiprocessors. A process architecture binds units of communication protocol processing (such as layers, functions, connections, and messages) with one or more processing elements. This paper describes several alternative process architectures supported by ADAPTIVE and outlines techniques used to perform controlled experimentation with these alter-
Prevalence of anemia among pediatric critical care survivors and impact of restrictive transfusion strategy on it: A study from North India BACKGROUND: Anemia occurs frequently in critically ill patients and it can impact on long-term outcome more so after a critical illness. Very little data are available about anemia in pediatric critical care survivors. Recent randomized control trials in children support the use of restrictive transfusion strategy in critically ill children. OBJECTIVES: This study aims to estimate the prevalence of anemia in pediatric critical care survivors, its causative factors and impact of restrictive transfusion strategy on its resolution. PATIENTS AND METHODS: In this retrospective observational study, patients who got discharged from pediatric intensive care unit (PICU), their clinical course, serial hemoglobin (Hb) level, and transfusion history were collected. Patients were divided into two groups according to transfusion strategy (restrictive and liberal group). Patients with anemia were followed up and persistence of anemia was noted monthly. RESULTS: In 54 cases enrolled in the study, 35 children had anemia (prevalence 35/54 = 64.8%). Statically significant difference between anemic and nonanemic groups was found in terms of duration of PICU stay and development of infection and no significant difference in age, gender, diagnosis, and requirement of mechanical ventilation. Among the 35 anemic children, restrictive transfusion strategy was followed in 21 and in 14 children liberal transfusion strategy was followed. Ten children were lost to follow-up. Anemia got resolved in 18 children (18/25 = 72%) and 7 children (7/25 = 20%) had persistent anemia. Among 25 children, 4 children in restrictive group (4/15 = 26.6%) and 3 children in liberal group (3/10 = 30%) had persistent anemia (statistically not significant; P > 0.05). CONCLUSIONS: A large proportion of PICU survivor children is anemic at discharge, this could be due to long duration PICU stay or acquired infection during hospital stay or following restrictive transfusion strategy but on a long-term follow-up no statistically significant difference was noted between two groups.
This lecture treats some enduring misconceptions about modeling. One of these is that the goal is always prediction. The lecture distinguishes between explanation and prediction as modeling goals, and offers sixteen reasons other than prediction to build a model. It also challenges the common assumption that scientific theories arise from and ‘summarize’ data, when often, theories precede and guide data collection; without theory, in other words, it is not clear what data to collect. Among other things, it also argues that the modeling enterprise enforces habits of mind essential to freedom. It is based on the author’s 2008 Bastille Day keynote address to the Second World Congress on Social Simulation, George Mason University, and earlier addresses at the Institute of Medicine, the University of Michigan, and the Santa Fe Institute.
On Feb. 2, an employee at St. Therese Catholic Church, 113 W. Kort St., reported a theft. On Feb. 6, a 32-year-old man was arrested on Becker Street at Peggy Lane on charges of possession of drug paraphernalia and a Marathon County warrant. On Feb. 10, a 27-year-old woman was arrested on Grand Avenue at Alexander Avenue on charges of first-offense drunken driving. On Feb. 11, an 18-year-old man was arrested on Business Highway 51 at Morrison Avenue on charges of possession of marijuana with intent to deliver and possession of drug paraphernalia. Another 18-year-old man and a 17-year-old girl were arrested at the same location on charges of possession of marijuana and possession of drug paraphernalia.
def iloc(self,idx,raw_keys=True): nice_data = np.copy(self._all_data[idx,[DataModel.START.value,DataModel.DIRECTION.value,DataModel.WEIGHT.value]]) return nice_data
package xworker.dataObject.swt; import org.xmeta.ActionContext; import org.xmeta.Thing; import xworker.dataObject.DataObject; public class StaticDataObjectActions { public static void bindToDataObjectForm(ActionContext actionContext) { Thing self = actionContext.getObject("self"); Thing form = actionContext.getObject("dataObjectForm"); DataObject dataObject = self.doAction("getDataObject", actionContext); if(dataObject != null && form != null) { form.doAction("setDataObject", actionContext, "dataObject", dataObject); } } }
Can we predict future acute coronary events in patients with stable coronary artery disease? Many therapeutic measures have been developed to effectively treat patients with coronary disease. While secondary prevention has become a vital component in maximizing reductions in coronary disease mortality, subsequent acute coronary events still occur and are often unexpected and unpredictable. See also P 325. We now recognize several standard risk factors for the development of coronary disease and a modulation of some of these risk factors has been shown to decrease the incidence and recurrence of coronary events. 1-3 With the help of noninvasive and invasive cardiologic technology, we may also stratify coronary patients into low- and high-risk subgroups. 4 Mortality in coronary disease is directly related to the number of diseased vessels, the presence of left ventricular dysfunction, and an ischemic response on treadmill exercise testing, particularly when it occurs during the early stages of exercise. However, as stratification in prognosis is not all or none (ie, only some
import torch.nn as nn import torch.nn.functional as fn class DenseBLSTMCTC(nn.Module): def __init__(self, num_classes): super(DenseBLSTMCTC, self).__init__() self.cnn = nn.Sequential( nn.Conv2d(3, 64, 3, 1, 1), nn.LeakyReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2), stride=2), nn.Conv2d(64, 128, 3, 1, 1), nn.LeakyReLU(inplace=True), nn.MaxPool2d(kernel_size=(2, 2), stride=2), nn.Conv2d(128, 256, 3, 1, 1), nn.LeakyReLU(inplace=True), nn.Conv2d(256, 256, 3, 1, 1), nn.LeakyReLU(inplace=True), nn.MaxPool2d(kernel_size=(1, 2), stride=2), nn.Conv2d(256, 512, 3, 1, 1), nn.BatchNorm2d(512), nn.LeakyReLU(inplace=True), nn.Conv2d(512, 512, 3, 1, 1), nn.BatchNorm2d(512), nn.LeakyReLU(inplace=True), nn.Conv2d(512, 512, 3, 1, 1), nn.LeakyReLU(inplace=True), ) self.lstm1 = nn.LSTM(512 * 4, 256, bidirectional=True) self.linear1 = nn.Linear(512, 512) self.lstm2 = nn.LSTM(512, 256, bidirectional=True) self.linear2 = nn.Linear(512, num_classes) # init weights and bias for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def forward(self, x): x = self.cnn(x) x = x.permute(3, 0, 1, 2) x = x.contiguous().view(x.shape[0], x.shape[1], -1) x, _ = self.lstm1(x) x = self.linear1(x) x, _ = self.lstm2(x) x = self.linear2(x) return fn.log_softmax(x, dim=2)
<reponame>AlexAUT/flightGame #pragma once #include <aw/engine/window.hpp> #include <aw/graphics/3d/orbitCameraController.hpp> #include <aw/utils/math/vector.hpp> namespace aw { class Camera; } // namespace aw class Airplane; class CameraController { public: CameraController(aw::Camera* camera = nullptr); void setCamera(aw::Camera* camera); void update(float delta, const Airplane& airplane); void processEvent(const aw::WindowEvent& event); private: private: aw::Camera* mCamera{nullptr}; aw::OrbitCameraController mOrbitalController; bool mMouseInit; aw::Vec2i mOldMousePos; };
Progress in global rollout of new multidrug-resistant tuberculosis treatments. SETTING: The global multidrug-resistant tuberculosis (MDR-TB) epidemic has grown over the past decade and continues to be difficult to manage. In response, new drugs and treatment regimens have been recommended.OBJECTIVE: In 2017 and again in 2018, the International Union Against Tuberculosis and Lung Disease (The Union) drug-resistant (DR) TB Working Group collaborated with RESIST-TB to implement an internet survey to members of The Union around the world to assess access to these new treatment strategies.DESIGN: A nine-question survey was developed using SurveyMonkey®. The survey was open for participation to all members of The Union registered under the TB Section. Two reminders were sent during each survey. The responses were analyzed taking into account the WHO Region to which the respondent belonged.RESULTS: The 2018 survey showed a global increase in implementation of the shorter (9-month) MDR-TB regimen (from 33% to 56% of respondents, P < 0.001) and an increase in the use of bedaquiline and/or delamanid (from 25% to 41% of respondents, P < 0.001) compared to 2017. There were substantial variations in roll-out between WHO regions.CONCLUSION: These results demonstrate improvement in global implementation of the new treatment strategies over a 1-year period.
import React, { useEffect, useState } from 'react'; import root from 'react-shadow'; import { useForceUpdate } from '../../../hooks/useForceUpdate'; import { Select as SelectControl, ISelectProps, } from '../Select.bundle/desktop'; const Select = (props: ISelectProps) => { const [value, setValue] = useState(props.value); return <SelectControl {...props} value={value} setValue={setValue} />; }; const demoList = [ { id: 'apple', content: 'Apple', disabled: true }, { id: 'banana', content: 'Banana' }, { id: 'meat', content: 'Meat' }, { id: 'hidden', content: 'Hidden value', hidden: true }, { title: 'Group name', items: [ { id: 'sosage', content: 'Sosage', disabled: true }, { id: 'butter', content: 'Butter', disabled: true }, { id: 'bread', content: 'Bread' }, ], }, { id: 'milk', content: 'Milk', disabled: true }, ]; export const Radio = () => ( <Select options={demoList} value={'apple'} placeholder="Select something" /> ); export const Checkbox = () => ( <Select options={demoList} value={['banana', 'meat']} placeholder="Select something" /> ); export const Disabled = () => ( <Select options={demoList} value={'banana'} placeholder="Select something" disabled /> ); export const ShadowDOM = () => { // Wait loading page, cuz styles may be inject in runtime const update = useForceUpdate(); useEffect(() => { window.addEventListener('load', update); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); return ( <root.div onKeyDown={(evt) => { evt.stopPropagation(); }} mode="closed" > {/* Clone all styles, cuz it's just demo */} <div> {Array.from(document.styleSheets).map(({ href }, idx) => { return href === null ? undefined : ( <link key={idx} href={href} rel="stylesheet" /> ); })} </div> <div>Component inside shadowDOM</div> <Radio /> </root.div> ); };
// NewUserLoginHandler creates a new Login Handler func NewUserLoginHandler(conn *pgx.Conn) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(w) var form *LoginForm dec := json.NewDecoder(r.Body) err := dec.Decode(&form) if err != nil { resp := APIResponse{ Code: http.StatusBadRequest, Type: "error", Message: "Please check the data.", } w.WriteHeader(http.StatusBadRequest) enc.Encode(resp) return } form.format() err = form.validate() if err != nil { resp := APIResponse{ Code: http.StatusBadRequest, Type: "error", Message: err.Error(), } w.WriteHeader(http.StatusBadRequest) enc.Encode(resp) return } var takenEmail string err = conn.QueryRow(context.Background(), "SELECT email FROM users WHERE email=$1", form.Email).Scan(&takenEmail) if err != nil { resp := APIResponse{ Code: http.StatusBadRequest, Type: "error", Message: "Invalid email or password", } w.WriteHeader(http.StatusBadRequest) enc.Encode(resp) return } var password string err = conn.QueryRow(context.Background(), "SELECT password FROM users WHERE email=$1", form.Email).Scan(&password) if err != nil { resp := APIResponse{ Code: http.StatusInternalServerError, Type: "error", Message: "Internal server error", } w.WriteHeader(http.StatusInternalServerError) enc.Encode(resp) fmt.Println(err) return } if form.Password != password { resp := APIResponse{ Code: http.StatusBadRequest, Type: "error", Message: "Invalid email or password", } w.WriteHeader(http.StatusBadRequest) enc.Encode(resp) return } resp := APIResponse{ Code: http.StatusOK, Type: "", Message: "Success Login", } w.WriteHeader(http.StatusOK) enc.Encode(resp) } }
Brahman Vedic Brahman is a concept present in Vedic Samhitas, the oldest layer of the Vedas dated to the 2nd millennium BCE. For example, The Ṛcs are limited (parimita), The Samans are limited, And the Yajuses are limited, But of the Word Brahman, there is no end. — Taittiriya Samhita VII.3.1.4, Translated by Barbara Holdrege The concept Brahman is referred to in hundreds of hymns in the Vedas. For example, it is found in Rig veda hymns such as 2.2.10, 6.21.8, 10.72.2 and in Atharva veda hymns such as 6.122.5, 10.1.12, and 14.1.131. The concept is found in various layers of the Vedic literature; for example: Aitareya Brahmana 1.18.3, Kausitaki Brahmana 6.12, Satapatha Brahmana 13.5.2.5, Taittiriya Brahmana 2.8.8.10, Jaiminiya Brahmana 1.129, Taittiriya Aranyaka 4.4.1 through 5.4.1, Vajasaneyi Samhita 22.4 through 23.25, Maitrayani Samhita 3.12.1:16.2 through 4.9.2:122.15. The concept is extensively discussed in the Upanishads embedded in the Vedas (see next section), and also mentioned in the vedāṅga (the limbs of Vedas) such as the Srauta sutra 1.12.12 and Paraskara Gryhasutra 3.2.10 through 3.4.5. Jan Gonda states that the diverse reference of Brahman in the Vedic literature, starting with Rigveda Samhitas, convey "different senses or different shades of meaning". There is no one single word in modern Western languages that can render the various shades of meaning of the word Brahman in the Vedic literature, according to Jan Gonda. In verses considered as the most ancient, the Vedic idea of Brahman is the "power immanent in the sound, words, verses and formulas of Vedas". However, states Gonda, the verses suggest that this ancient meaning was never the only meaning, and the concept evolved and expanded in ancient India. Barbara Holdrege states that the concept Brahman is discussed in the Vedas along four major themes: as the Word or verses (Sabdabrahman), as Knowledge embodied in Creator Principle, as Creation itself, and a Corpus of traditions. Hananya Goodman states that the Vedas conceptualize Brahman as the Cosmic Principles underlying all that exists. Gavin Flood states that the Vedic era witnessed a process of abstraction, where the concept of Brahman evolved and expanded from the power of sound, words and rituals to the "essence of the universe", the "deeper foundation of all phenomena", the "essence of the self (Atman, soul)", and the deeper "truth of a person beyond apparent difference". Brahman as a metaphysical concept Brahman is the key metaphysical concept in various schools of Hindu philosophy. It is the theme in its diverse discussions to the two central questions of metaphysics: what is ultimately real, and are there principles applying to everything that is real? Brahman is the ultimate "eternally, constant" reality, while the observed universe is a different kind of reality but one which is "temporary, changing" Maya in various orthodox Hindu schools. Maya pre-exists and co-exists with Brahman—the Ultimate Reality, The Highest Universal, the Cosmic Principles. In addition to the concept of Brahman, Hindu metaphysics includes the concept of Atman—or soul, self—which is also considered ultimately real. The various schools of Hinduism, particularly the dual and non-dual schools, differ on the nature of Atman, whether it is distinct from Brahman, or same as Brahman. Those that consider Brahman and Atman as distinct are theistic, and Dvaita Vedanta and later Nyaya schools illustrate this premise. Those that consider Brahman and Atman as same are monist or pantheistic, and Advaita Vedanta, later Samkhya and Yoga schools illustrate this metaphysical premise. In schools that equate Brahman with Atman, Brahman is the sole, ultimate reality. The predominant teaching in the Upanishads is the spiritual identity of soul within each human being, with the soul of every other human being and living being, as well as with the supreme, ultimate reality Brahman. In the metaphysics of the major schools of Hinduism, Maya is perceived reality, one that does not reveal the hidden principles, the true reality—the Brahman. Maya is unconscious, Brahman-Atman is conscious. Maya is the literal and the effect, Brahman is the figurative Upādāna—the principle and the cause. Maya is born, changes, evolves, dies with time, from circumstances, due to invisible principles of nature. Atman-Brahman is eternal, unchanging, invisible principle, unaffected absolute and resplendent consciousness. Maya concept, states Archibald Gough, is "the indifferent aggregate of all the possibilities of emanatory or derived existences, pre-existing with Brahman", just like the possibility of a future tree pre-exists in the seed of the tree. While Hinduism sub-schools such as Advaita Vedanta emphasize the complete equivalence of Brahman and Atman, they also expound on Brahman as saguna Brahman—the Brahman with attributes, and nirguna Brahman—the Brahman without attributes. The nirguna Brahman is the Brahman as it really is, however, the saguna Brahman is posited as a means to realizing nirguna Brahman, but the Hinduism schools declare saguna Brahman to be a part of the ultimate nirguna Brahman The concept of the saguna Brahman, such as in the form of avatars, is considered in these schools of Hinduism to be a useful symbolism, path and tool for those who are still on their spiritual journey, but the concept is finally cast aside by the fully enlightened. Brahman as an ontological concept Brahman, along with Soul/Self (Atman) are part of the ontological premises of Indian philosophy. Different schools of Indian philosophy have held widely dissimilar ontologies. Buddhism and Carvaka school of Hinduism deny that there exists anything called "a soul, a self" (individual Atman or Brahman in the cosmic sense), while the orthodox schools of Hinduism, Jainism and Ajivikas hold that there exists "a soul, a self". Brahman as well the Atman in every human being (and living being) is considered equivalent and the sole reality, the eternal, self-born, unlimited, innately free, blissful Absolute in schools of Hinduism such as the Advaita Vedanta and Yoga. Knowing one's own self is knowing the God inside oneself, and this is held as the path to knowing the ontological nature of Brahman (universal Self) as it is identical to the Atman (individual Self). The nature of Atman-Brahman is held in these schools, states Barbara Holdrege, to be as a pure being (sat), consciousness (cit) and full of bliss (ananda), and it is formless, distinctionless, nonchanging and unbounded. In theistic schools, in contrast, such as Dvaita Vedanta, the nature of Brahman is held as eternal, unlimited, innately free, blissful Absolute, while each individual's soul is held as distinct and limited which can at best come close in eternal blissful love of the Brahman (therein viewed as the Godhead). Other schools of Hinduism have their own ontological premises relating to Brahman, reality and nature of existence. Vaisheshika school of Hinduism, for example, holds a substantial, realist ontology. The Carvaka school denied Brahman and Atman, and held a materialist ontology. Brahman as an axiological concept Brahman and Atman are key concepts to Hindu theories of axiology: ethics and aesthetics. Ananda (bliss), state Michael Myers and other scholars, has axiological importance to the concept of Brahman, as the universal inner harmony. Some scholars equate Brahman with the highest value, in an axiological sense. The axiological concepts of Brahman and Atman is central to Hindu theory of values. A statement such as 'I am Brahman', states Shaw, means 'I am related to everything', and this is the underlying premise for compassion for others in Hinduism, for each individual's welfare, peace, or happiness depends on others, including other beings and nature at large, and vice versa. Tietge states that even in non-dual schools of Hinduism where Brahman and Atman are treated ontologically equivalent, the theory of values emphasizes individual agent and ethics. In these schools of Hinduism, states Tietge, the theory of action are derived from and centered in compassion for the other, and not egotistical concern for the self. The axiological theory of values emerges implicitly from the concepts of Brahman and Atman, states Bauer. The aesthetics of human experience and ethics are one consequence of self-knowledge in Hinduism, one resulting from the perfect, timeless unification of one's soul with the Brahman, the soul of everyone, everything and all eternity, wherein the pinnacle of human experience is not dependent on an afterlife, but pure consciousness in the present life itself. It does not assume that an individual is weak nor does it presume that he is inherently evil, but the opposite: human soul and its nature is held as fundamentally unqualified, faultless, beautiful, blissful, ethical, compassionate and good. Ignorance is to assume it evil, liberation is to know its eternal, expansive, pristine, happy and good nature. The axiological premises in the Hindu thought and Indian philosophies in general, states Nikam, is to elevate the individual, exalting the innate potential of man, where the reality of his being is the objective reality of the universe. The Upanishads of Hinduism, summarizes Nikam, hold that the individual has the same essence and reality as the objective universe, and this essence is the finest essence; the individual soul is the universal soul, and Atman is the same reality and the same aesthetics as the Brahman. Brahman as a teleological concept Brahman and Atman are very important teleological concepts. Teleology deals with the apparent purpose, principle or goal of something. In the first chapter of the Shvetashvatara Upanishad, these questions are dealt with. It says : "People who make inquiries about brahman say: What is the cause of Brahman? Why were we born? By what do we live? On what are we established? Governed by whom, O you who know Brahman, do we live in pleasure and in pain, each in our respective situation? — Svetashvatara Upanishad, Hymns 1.1 The main purpose of the Brahman and why it exists is a subjective question according to the Upanishads. One can only find out its true purpose when one becomes the Brahman as the 'Brahman' is all the knowledge one can know itself. Hence, complete answers for anything in life can only be determined or obtained when the Brahman is realized as the Brahman is all the complete knowledge itself. This is said in the Aitareya Upanishad 3.3 and Brihadaranyaka Upanishad 4.4.17 and many other Upanishads. Knowledge is the eye of all that, and on knowledge it is founded. Knowledge is the eye of the world, and knowledge, the foundation. Brahman is knowing. — Aitereya Upanishad, Hymns 3.3 One of the reasons to why the Brahman should be realized according to the Upanishads is because it removes suffering from a person's life. This is because the person has the ability and knowledge to discriminate between the unchanging (Atman and Brahman) and the ever-changing (Prakrit) and so the person is not attached to the transient. Hence, the person is only content with the self and not his body or anything other than the self. In Brihadaranyaka Upanishad 3.9.26 it mentions that the atman 'neither trembles in fear nor suffers injury' and in the Isha Upanishad 6-7 it too talks about suffering as non existent when one becomes the Brahman as they see the self in all beings and all beings in the self. Therefore, the apparent purpose of Brahman is in discussion in the Upanishads but the Brahman itself is the only self-contained purpose and true goal according to the Upanishads, so posing the question is redundant. The Upanishads consider the Brahman the only actual worthwhile goal in life and ultimately one should aim to become it as it is the means and an end in and of itself to ultimate knowledge, immortality, etc. So the question of what is the ultimate purpose of everything including the Brahman is answered by realizing or attaining the Brahman as the Brahman itself is ultimate knowledge. Hence, the Brahman is a teleological concept as it is the ultimate purpose and goal of everything possible and permeates everything and is in everything. Brahman as a soteriological concept: Moksha The orthodox schools of Hinduism, particularly Vedanta, Samkhya and Yoga schools, focus on the concept of Brahman and Atman in their discussion of moksha. The Advaita Vedanta holds there is no being/non-being distinction between Atman and Brahman. The knowledge of Atman (Self-knowledge) is synonymous to the knowledge of Brahman inside the person and outside the person. Furthermore, the knowledge of Brahman leads to a sense of oneness with all existence, self-realization, indescribable joy, and moksha (freedom, bliss), because Brahman-Atman is the origin and end of all things, the universal principle behind and at source of everything that exists, consciousness that pervades everything and everyone. The theistic sub-school such as Dvaita Vedanta of Hinduism, starts with the same premises, but adds the premise that individual souls and Brahman are distinct, and thereby reaches entirely different conclusions where Brahman is conceptualized in a manner similar to God in other major world religions. The theistic schools assert that moksha is the loving, eternal union or nearness of one's soul with the distinct and separate Brahman (Vishnu, Shiva or equivalent henotheism). Brahman, in these sub-schools of Hinduism is considered the highest perfection of existence, which every soul journeys towards in its own way for moksha. Vedanta The concept of Brahman, its nature and its relationship with Atman and the observed universe, is a major point of difference between the various sub-schools of the Vedanta school of Hinduism. Achintya Bheda Abheda The Acintya Bheda Abheda philosophy is similar to Dvaitadvaita (differential monism). In this philosophy, Brahman is not just impersonal, but also personal. That Brahman is Supreme Personality of Godhead, though on first stage of realization (by process called jnana) of Absolute Truth, He is realized as impersonal Brahman, then as personal Brahman having eternal Vaikuntha abode (also known as Brahmalokah sanatana), then as Paramatma (by process of yoga–meditation on Supersoul, Vishnu-God in heart)—Vishnu (Narayana, also in everyone's heart) who has many abodes known as Vishnulokas (Vaikunthalokas), and finally (Absolute Truth is realized by bhakti) as Bhagavan, Supreme Personality of Godhead, who is source of both Paramatma and Brahman (personal, impersonal, or both). Vaishnavism All Vaishnava schools are panentheistic and perceive the Advaita concept of identification of Atman with the impersonal Brahman as an intermediate step of self-realization, but not Mukti, or final liberation of complete God-realization through Bhakti Yoga. Gaudiya Vaishnavism, a form of Achintya Bheda Abheda philosophy, also concludes that Brahman is the Supreme Personality of Godhead. According to them, Brahman is Lord Vishnu/Krishna; the universe and all other manifestations of the Supreme are extensions of Him. Bhakti movement The Bhakti movement of Hinduism built its theosophy around two concepts of Brahman—Nirguna and Saguna. Nirguna Brahman was the concept of the Ultimate Reality as formless, without attributes or quality. Saguna Brahman, in contrast, was envisioned and developed as with form, attributes and quality. The two had parallels in the ancient pantheistic unmanifest and theistic manifest traditions, respectively, and traceable to Arjuna-Krishna dialogue in the Bhagavad Gita. It is the same Brahman, but viewed from two perspectives, one from Nirguni knowledge-focus and other from Saguni love-focus, united as Krishna in the Gita. Nirguna bhakta's poetry were Jnana-shrayi, or had roots in knowledge. Saguna bhakta's poetry were Prema-shrayi, or with roots in love. In Bhakti, the emphasis is reciprocal love and devotion, where the devotee loves God, and God loves the devotee. Jeaneane Fowler states that the concepts of Nirguna and Saguna Brahman, at the root of Bhakti movement theosophy, underwent more profound development with the ideas of Vedanta school of Hinduism, particularly those of Adi Shankara's Advaita Vedanta, Ramanuja's Vishishtadvaita Vedanta, and Madhvacharya's Dvaita Vedanta. Two 12th-century influential treatises on bhakti were Sandilya Bhakti Sutra—a treatise resonating with Nirguna-bhakti, and Narada Bhakti Sutra—a treatise that leans towards Saguna-bhakti. Nirguna and Saguna Brahman concepts of the Bhakti movement has been a baffling one to scholars, particularly the Nirguni tradition because it offers, states David Lorenzen, "heart-felt devotion to a God without attributes, without even any definable personality". Yet given the "mountains of Nirguni bhakti literature", adds Lorenzen, bhakti for Nirguna Brahman has been a part of the reality of the Hindu tradition along with the bhakti for Saguna Brahman. These were two alternate ways of imagining God during the bhakti movement. Buddhist understanding of Brahman Buddhism rejects the Upanishadic doctrine of Brahman and Atman (soul, permanent self, essence). According to Damien Keown, "the Buddha said he could find no evidence for the existence of either the personal soul (atman) or its cosmic counterpart (brahman)". The metaphysics of Buddhism rejects Brahman (ultimate being), Brahman-like essence, soul and anything metaphysically equivalent through its Anatta doctrine. According to Merv Fowler, some forms of Buddhism have incorporated concepts that resemble that of Brahman. As an example, Fowler cites the early Sarvastivada school of Buddhism, which "had come to accept a very pantheistic religious philosophy, and are important because of the impetus they gave to the development of Mahayana Buddhism". According to William Theodore De Bary, in the doctrines of the Yogacara school of Mahayana Buddhism, "the Body of Essence, the Ultimate Buddha, who pervaded and underlay the whole universe [...] was in fact the World Soul, the Brahman of the Upanishads, in a new form". According to Fowler, some scholars have identified the Buddhist nirvana, conceived of as the Ultimate Reality, with the Hindu Brahman/atman; Fowler claims that this view "has gained little support in Buddhist circles." Fowler asserts that the authors of a number of Mahayana texts took pains to differentiate their ideas from the Upanishadic doctrine of Brahman. Brahma as a surrogate for Brahman in Buddhist texts The spiritual concept of Brahman is far older in the Vedic literature, and some scholars suggest deity Brahma may have emerged as a personal conception and icon with form and attributes (saguna version) of the impersonal, nirguna (without attributes), formless universal principle called Brahman. In the Hindu texts, one of the earliest mention of deity Brahma along with Vishnu and Shiva is in the fifth Prapathaka (lesson) of the Maitrayaniya Upanishad, probably composed in late 1st millennium BCE, after the rise of Buddhism. The early Buddhists attacked the concept of Brahma, states Gananath Obeyesekere, and thereby polemically attacked the Vedic and Upanishadic concept of gender neutral, abstract metaphysical Brahman. This critique of Brahma in early Buddhist texts aim at ridiculing the Vedas, but the same texts simultaneously call metta (loving-kindness, compassion) as the state of union with Brahma. The early Buddhist approach to Brahma was to reject any creator aspect, while retaining the value system in the Vedic Brahmavihara concepts, in the Buddhist value system. According to Martin Wiltshire, the term "Brahma loka" in the Buddhist canon, instead of "Svarga loka", is likely a Buddhist attempt to choose and emphasize the "truth power" and knowledge focus of the Brahman concept in the Upanishads. Simultaneously, by reformulating Brahman as Brahma and relegating it within its Devas and Samsara theories, early Buddhism rejected the Atman-Brahman premise of the Vedas to present of its own Dhamma doctrines (anicca, dukkha and anatta). Brahman in Sikhism The metaphysical concept of Brahman, particularly as nirguni Brahman—attributeless, formless, eternal Highest Reality—is at the foundation of Sikhism. This belief is observed through nirguni Bhakti by the Sikhs. In Gauri, which is part of the Guru Granth Sahib, Brahman is declared as "One without a second", in Sri Rag "everything is born of Him, and is finally absorbed in Him", in Var Asa "whatever we see or hear is the manifestation of Brahman". Nesbitt states that the first two words, Ik Onkar, in the twelve-word Mul Mantar at the opening of the Sikh scripture Guru Granth Sahib, has been translated in three different ways by scholars: "There is one god", "This being is one", and as "One reality is". Similar emphasis on "One without a second" for metaphysical concept of Brahman, is found in ancient texts of Hinduism, such as the Chandogya Upanishad's chapter 6.2. The ideas about God and Highest Reality in Sikhism share themes found in the Saguna and Nirguna concepts of Brahman in Hinduism. The concept of Ultimate Reality (Brahman) is also referred in Sikhism as Nam, Sat-naam or Naam, and Ik Oankar like Hindu Om symbolizes this Reality. Brahman in Jainism Scholars contest whether the concept of Brahman is rejected or accepted in Jainism. The concept of a theistic God is rejected by Jainism, but Jiva or "Atman (soul) exists" is held to be a metaphysical truth and central to its theory of rebirths and Kevala Jnana. Bissett states that Jainism accepts the "material world" and "Atman", but rejects Brahman—the metaphysical concept of Ultimate Reality and Cosmic Principles found in the ancient texts of Hinduism. Goswami, in contrast, states that the literature of Jainism has an undercurrent of monist theme, where the self who gains the knowledge of Brahman (Highest Reality, Supreme Knowledge) is identical to Brahman itself. Jaini states that Jainism neither accepts nor rejects the premise of Ultimate Reality (Brahman), instead Jain ontology adopts a many sided doctrine called Anekantavada. This doctrine holds that "reality is irreducibly complex" and no human view or description can represent the Absolute Truth. Those who have understood and realized the Absolute Truth are the liberated ones and the Supreme Souls, with Kevala Jnana. Comparison of Brahma, Brahman, Brahmin and Brahmanas Brahma is distinct from Brahman. Brahma is a male deity, in the post-Vedic Puranic literature, who creates but neither preserves nor destroys anything. He is envisioned in some Hindu texts to have emerged from the metaphysical Brahman along with Vishnu (preserver), Shiva (destroyer), all other gods, goddesses, matter and other beings. In theistic schools of Hinduism where deity Brahma is described as part of its cosmology, he is a mortal like all gods and goddesses, and dissolves into the abstract immortal Brahman when the universe ends, thereafter a new cosmic cycle (kalpa) restarts again. Brahman is a metaphysical concept of Hinduism referring to the ultimate unchanging reality, that, states Doniger, is uncreated, eternal, infinite, transcendent, the cause, the foundation, the source and the goal of all existence. It is envisioned as either the cause or that which transforms itself into everything that exists in the universe as well as all beings, that which existed before the present universe and time, which exists as current universe and time, and that which will absorb and exist after the present universe and time ends. It is a gender neutral abstract concept. The abstract Brahman concept is predominant in the Vedic texts, particularly the Upanishads; while the deity Brahma finds minor mention in the Vedas and the Upanishads. In the Puranic and the Epics literature, deity Brahma appears more often, but inconsistently. Some texts suggest that god Vishnu created Brahma (Vaishnavism), others suggest god Shiva created Brahma (Shaivism), yet others suggest goddess Devi created Brahma (Shaktism), and these texts then go on to state that Brahma is a secondary creator of the world working respectively on their behalf. Further, the medieval era texts of these major theistic traditions of Hinduism assert that the saguna Brahman is Vishnu, is Shiva, or is Devi respectively, they are different names or aspects of the Brahman, and that the Atman (soul, self) within every living being is same or part of this ultimate, eternal Brahman. Brahmin is a varna in Hinduism specialising in theory as priests, preservers and transmitters of sacred literature across generations. The Brahmanas are one of the four ancient layers of texts within the Vedas. They are primarily a digest incorporating myths, legends, the explanation of Vedic rituals and in some cases philosophy. They are embedded within each of the four Vedas, and form a part of the Hindu śruti literature.
<reponame>TerryRPatterson/Coursework<filename>python/Exercises105Bonus.py def countDown(start,message): import time if start > 20: raise OverflowError("Countdown can accept numbers bigger that 20.") for i in range(start,0,-1): time.sleep(1) print(i) print(message) countDown(20,"Blastoff!🚀")
Why hasnt evolution selected for perfect self-control? Self-control refers to the ability to deliberately reject tempting options and instead select ones that produce greater long-term benefits. Although some apparent failures of self-control are, on closer inspection, reward maximizing, at least some self-control failures are clearly disadvantageous and non-strategic. The existence of poor self-control presents an important evolutionary puzzle because there is no obvious reason why good self-control should be more costly than poor self-control. After all, a rock is infinitely patient. I propose that self-control failures result from cases in which well-learned (and thus routinized) decision making strategies yield suboptimal choices. These mappings persist in the decision-makers repertoire because they result from learning processes that are adaptive in the broader context, either on the timescale of learning or of evolution. Self-control, then, is a form of cognitive control and the subjective feeling of effort likely reflects the true costs of cognitive control. Poor self-control, in this view, is ultimately a result of bounded optimality.
def plot_roc_curve(fpr: np.ndarray, tpr: np.ndarray, trainy: np.ndarray, y_scores: np.ndarray, label: str = None): plt.plot(fpr, tpr, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'k--') plt.title("ROC Curve") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate (Recall)") auc_score = "ROC AUC = {:.4f}".format( roc_auc_score(trainy, y_scores[:, 1])) plt.annotate(auc_score, (0.5, 0.3)) plt.show()
package client import ( "fmt" "io/ioutil" "net/http" "net/url" "strconv" "strings" "github.com/valyala/fasthttp" ) type influxClientV1 struct { influxClient database string user string pass string } func newInfluxDbV1Client(cfg InfluxConfig, httpClient *fasthttp.Client) *influxClientV1 { return &influxClientV1{ influxClient: influxClient{ name: cfg.Name, baseURL: cfg.URL, httpClient: httpClient, writeURL: []byte(writeURLFromConfigV1(cfg)), }, database: cfg.V1.Database, user: cfg.V1.User, pass: cfg.V1.Pass, } } func (c *influxClientV1) Create(command string) error { if command == "" { command = "CREATE DATABASE " + c.database } return c.sendCmd(command) } func (c *influxClientV1) Reset() error { return c.sendCmd("DROP DATABASE " + c.database) } func (c *influxClientV1) sendCmd(cmd string) error { vals := url.Values{} vals.Set("q", cmd) u, err := url.Parse(c.baseURL) if err != nil { return err } if c.user != "" && c.pass != "" { u.User = url.UserPassword(c.user, c.pass) } req, err := http.NewRequest("POST", u.String()+"/query", strings.NewReader(vals.Encode())) if err != nil { return err } req.Header.Add("Content-Type", "application/x-www-form-urlencoded") req.Header.Add("Content-Length", strconv.Itoa(len(vals.Encode()))) req.PostForm = vals resp, err := http.DefaultClient.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 200 { body, _ := ioutil.ReadAll(resp.Body) return fmt.Errorf( "Bad status code during execute cmd (%s): %d, body: %s", cmd, resp.StatusCode, string(body), ) } return nil } func writeURLFromConfigV1(cfg InfluxConfig) string { params := url.Values{} v1 := cfg.V1 params.Set("db", v1.Database) if v1.User != "" { params.Set("u", v1.User) } if cfg.V1.Pass != "" { params.Set("p", v1.Pass) } if v1.RetentionPolicy != "" { params.Set("rp", v1.RetentionPolicy) } if cfg.Precision != "n" && cfg.Precision != "" { params.Set("precision", cfg.Precision) } if cfg.Consistency != "one" && cfg.Consistency != "" { params.Set("consistency", cfg.Consistency) } return cfg.URL + "/write?" + params.Encode() }
Long-Term outcome of drug-eluting stents compared with bare metal stents in ST-segment elevation myocardial infarction: results of the paclitaxel- or sirolimus-eluting stent versus bare metal stent in Primary Angioplasty (PASEO) Randomized Trial. BACKGROUND Drug-eluting stents may offer benefits in terms of repeat revascularization that may be counterbalanced by a potential higher risk of stent thrombosis, especially among ST-segment elevation myocardial infarction (STEMI) patients. No data have been reported so far on the long-term benefits and safety of drug-eluting stents in STEMI. Thus, the aim of the present study was to evaluate the short- and long-term benefits of sirolimus-eluting stents (SES) and paclitaxel-eluting stents (PES) compared with bare metal stents (BMS) in patients undergoing primary angioplasty. METHODS AND RESULTS Consecutive STEMI patients admitted within 12 hours of symptom onset and undergoing primary angioplasty and stent implantation at a tertiary center with 24-hour primary percutaneous coronary intervention capability were randomly assigned to BMS, PES, or SES. All patients received upstream glycoprotein IIb/IIIa inhibitors. The primary end point was target lesion revascularization at the 1-year follow-up. Secondary end points were death and/or reinfarction, in-stent thrombosis, and major adverse cardiac events (combined death and/or reinfarction and/or target lesion revascularization) at long-term follow-up (up to 4 to 6 years). Cumulative incidence of end points was investigated. No patient was lost to follow-up. From October 1, 2003, to December 31, 2005, 270 patients with STEMI were randomized to BMS (n=90), PES (n=90), or SES (n=90). Procedural success was obtained in 93% to 95% of patients. Follow-up data were available for all patients. Compared with BMS (14.4%), both PES (4.4%; hazard ratio, 0.29; 95% confidence interval, 0.095 to 0.89; P=0.023) and SES (3.3%; hazard ratio, 0.21; 95% confidence interval, 0.06 to 0.75; P=0.016) were associated with a significant reduction in target lesion revascularization at the 1-year follow-up (primary study end point). At the long-term follow-up (4.3 years; 25th to 75th percentile, 3.7 to 5 years), no difference was observed in terms of death, reinfarction, and combined death and/or reinfarction, but compared with BMS (22.2%), both PES (6.7%; hazard ratio, 0.27; 95% confidence interval, 0.11 to 0.68; P=0.005) and SES (5.6%; hazard ratio, 0.22; 95% confidence interval, 0.083 to 0.59; P=0.003) were associated with a significant reduction in target lesion revascularization. CONCLUSIONS This study shows that among STEMI patients undergoing primary angioplasty, both SES and PES are associated with significant benefits in terms of target lesion revascularization at the long-term follow-up compared with BMS with no excess risk of thrombotic complications. Thus, until the results of further large randomized trials with long-term follow-up become available, drug-eluting stents may be considered among STEMI patients undergoing primary angioplasty.
<reponame>SSEHUB/EASyProducer package net.ssehub.easy.reasoning.drools; import java.io.File; import java.io.IOException; import org.junit.Test; /** * Class to test constraints over variables of compound type. * //@author Phani * */ public class DroolsCompoundConstraints extends AbstractTest { private static final String DIRPATH = "compoundConstraints" + File.separator; @Test public void testIsDefinedInvalid() throws IOException { assertConsistency(DIRPATH + "IsDefinedInvalid.ivml", true); } @Test public void testIsDefinedValidTwo() throws IOException { assertConsistency(DIRPATH + "IsDefinedWithDefault.ivml", false); } @Test public void testIsDefinedNestedCompound() throws IOException { assertConsistency(DIRPATH + "IsDefinedNestedCompound.ivml", true); } /** * Test fails. * //@throws IOException */ //@Test public void testIsDefinedNestedCompoundTwo() throws IOException { assertConsistency(DIRPATH + "IsDefinedNestedCompoundWithdefault.ivml", false); } @Test public void testConstraintsAcrossCompounds() throws IOException { assertConsistency(DIRPATH + "ConstraintsAcrossCompoundElements.ivml", true); } //@Test public void testCompoundAssignmentsCheck() throws IOException { assertConsistency(DIRPATH + "CompoundAssignmentsCheck.ivml", true); } }
/** Records a particular suppression for a region of a file. */ private static class Entry { /** The source name of the suppressed check. */ private final String checkName; /** The suppression region for the check - first line. */ private final int firstLine; /** The suppression region for the check - first column. */ private final int firstColumn; /** The suppression region for the check - last line. */ private final int lastLine; /** The suppression region for the check - last column. */ private final int lastColumn; /** * Constructs a new suppression region entry. * @param checkName the source name of the suppressed check * @param firstLine the first line of the suppression region * @param firstColumn the first column of the suppression region * @param lastLine the last line of the suppression region * @param lastColumn the last column of the suppression region */ Entry(String checkName, int firstLine, int firstColumn, int lastLine, int lastColumn) { this.checkName = checkName; this.firstLine = firstLine; this.firstColumn = firstColumn; this.lastLine = lastLine; this.lastColumn = lastColumn; } /** * Gets he source name of the suppressed check. * @return the source name of the suppressed check */ public String getCheckName() { return checkName; } /** * Gets the first line of the suppression region. * @return the first line of the suppression region */ public int getFirstLine() { return firstLine; } /** * Gets the first column of the suppression region. * @return the first column of the suppression region */ public int getFirstColumn() { return firstColumn; } /** * Gets the last line of the suppression region. * @return the last line of the suppression region */ public int getLastLine() { return lastLine; } /** * Gets the last column of the suppression region. * @return the last column of the suppression region */ public int getLastColumn() { return lastColumn; } }
Hacking the organization: Organizational Transformation by Design Abstract Todays organizations are facing the challenge of almost constant change. This paper suggests that design can play a bigger role in organizational transformation, one that is on structure, based on conversation, and focused on thinking in terms of change processes. By asking: How might design influence organizational change and system behavior in organizations understood as social systems? we hypothesize that addressing this question through the lens of Strategic Design Planning will shed light on a set of potential competencies needed for that level of work. Accordingly the paper synthesizes and discusses key literature from three areas: organizational change, organization as social system, and design for transformation. The result points to overlapping concepts that converge into a common statement as basis for empowerment in human systems driven by design. The research contributes to an understanding of the design organization interface. Introduction From a management consulting standpoint, Richard Normann identifies digital technology and associated connectivity as key drivers for changing the context of value creation, bringing about disruption of existing patterns and leading to the evolution of new patterns. This transition space between strategic paradigms requires organizations to change themselves and reconfigure their business systems in order to fit better into the new environment. This requires new capabilities towards more exploratory modes of leadership, organization and interaction. It is from such a background that the rise of the Design Thinking movement can be understood. Design Thinking, and how it is framed within non-design sectors, is seen as function of a transitional moment, "to aid in the navigation of transition" (Stewart, 2011, p. 517). However, Design Thinking, as it is currently integrated in organizations is only related to innovation, the business side of an enterprise, but not in the organization side of it. It remains largely unspecific in the process used, and only specific insofar as it is also increasingly applied to newer problem areas. With respect to organization and related collaboration, we ask if design can play a bigger role in organizational transformation, one that moves from integrating design thinking in innovation to a broader role in designing organizational structures and cultures. More explicitly: "How might design influence organizational change and system behavior in organizations -understood as social systems?" We hypothesize that by addressing this question through the lens of Strategic Design Planning, a potential competency set for design's contribution to organizational transformation could be further sketched out. Towards that end, the paper synthesizes key literature from the organization sciences and design sciences. The result highlights key design competencies for designers in the broad sense who aim at bringing about change in the way people in an organization work together. Strategic Design Planning Strategic Design Planning is the most strategic one of four different levels of design practice, according to Heskett's Design as Strategy model (Heskett, 2005, see Dilnot, 2016. It emphasizes corporate-wide strategies and innovation at the "Original Strategic Management" (OSM) level. A designer as planner engages in planning activities with a focus on new concepts and systems that have an organizational and strategic emphasis. Activity areas and related key design competencies include "systemic innovations, including new products, services and systems (). This involves continuous innovation rather than single projects." (p. 261) The Institute of Design, where Heskett served as a professor, adds organizational processes to the list, linking the design process to the identification of opportunities (Institute of Design, 2008). Strategic Design Planning competences relate to the following principles: differentiation between process skills and domain knowledge, balancing problem definition and solution finding, addressing unframed challenges, focusing on sense-making activities prior to strange-making activities, and prioritizing human beings and human values as incorporated in the Human-Centered Design paradigm. Principles include a strong grounding in a hybrid toolbox of social science, design and business methods, a system view, a focus on the relationship between user and business values, a consideration of the relationship between means and ends, an estimation of the value of notknowing as a prerequisite to understanding the not yet understood. However, the concept lacks an explicit focus and related competency on the social, human side of the actors inside the organization. In order to compensate for that gap, we propose to broaden its scope to include the issue of organizational structure. We propose this because structure understood as organizing is the defining element for innovating. Structure is generating behavior. This is Senge's first principle of systems thinking. That way, the organization part of the enterprise and the business part of the enterprise can be addressed together and thus more completely relate to the organizational enablers of leadership, organization, and interaction. This proposal is stimulated by an understanding of the law of requisite variety. According to that law a system such as an organization should match the complexity of its environment in order to be able to survive. With regard to design, Jonas makes the same claim, saying that the inner complexity of design in order to be able to better to deal with the outer complexity of where it is applied is not sufficient. The inclusion of the organization part of the enterprise will increase that complexity and thus broaden the possibilities of design to engage and contribute in organizational transformation by opening up a space for "Starting points for alternative organization ideas" (Baecker, 1999, p. 356). In order to look more closely into design for alternative ways of organizing, we will look at how design is seen from the standpoint of organization theory and the fields of organizational development and organizational design. The trajectory can be regarded as a pull-strategy to increase relevance for practice. Romme describes a genealogy of design methodologies for organization and management. The trajectory starts with the control and coordination of production, then moves on to codified processes for planning, and from there to co-evolutionary systems approaches and organizational learning (including the idea of intervention) that shape the organization as a human social system. More recent approaches in integrating design are seen in providing the prescriptive knowledge necessary for implementation together with a focus on humanistic values (Van Aken, 2007), thus overcoming self-perceived conceptual and methodological shortcomings. Romme calls this "relevance gap" in organizational and management sciences, indicating that science based evidence is often not relevant for the practitioners who act in design mode. Four perspectives illustrate specific explicit and implicit interpretations of what might inform a potential competency set. Organizational Change From an organizational development perspective, Scheuer is reporting about an organizational change project on clinical pathways in a psychiatric ward. He analyzes that "what might be most important in design processes and when doing organizational design work is to focus on how local humans and non-humans in specific organizational socio-cultural and material contexts may be mobilized in order to achieve wished-for goals. The important type of knowledge in this connection is knowledge about the organizational design process and the context in which the designing takes place." (p. 61) Scheuer's consulting perspective is an interventionist's view aimed at empowering the local team to act in design mode. From a standpoint of organizational design, Barry describes a hypothetical shift from analytical to designerly organization design, mentioning that the latter "will gradually incorporate complexity thinking, " While pointing to their key differences in terms of "convergent, law-directed formulations" versus "divergent, law-breaking ideas" (p. 89) and concluding that designerly ways of organizing open up beyond organizational structure issues into intertwined organizational identity issues. Barry's scholarly perspective points to a systemic perspective, humanistic values, and ultimately to the purpose of the organization. Weick, also from an organizational design perspective, is expanding to organizational design from a managing perspective. He compares Frank Gehry's practice or architecting with Dee Hock designing VISA, by analyzing how Hock was able to turn hard disagreement into agreement. The story goes as following: on the final day of the attempt to form VISA, the meeting was polarized. Hock adjourned the meeting and invited everyone for dinner. After dinner, reminiscing about the shared experience of two years trying to form VISA, Hock let the waiters place a small gift box in front of everyone. The box contained a set of golden cuff links, both depicting halfs of the globe, one with the phrase "the will to succeed", the other with "the grace to compromise". He asked the participants to wear the cuffs the next morning, adding: "Will you please wear the cuff links to the meeting in the morning? When we part we will take with us a reminder for the rest of our lives that the world can never be united through us because we lack the will to succeed and the grace to compromise. But if by some miracle our differences dissolve before morning, this gift will remind us that the world was united because we did have the will to succeed and the grace to compromise" (Weick, 2004, p. 38, paraphrased from Hock, 1999. The next morning, agreement was reached. So far the story. The incident is about a visionary designer moving a grand idea through many people. This is about a progression from purpose to principles to people to concept to structure to practices, but also about the limits of design, because nothing needs to be designed to the end, insofar as people "have the confidence, latitude, and expertise to self-organize the rest of the details." (p. 38) And: "The incident is about agreement. () Effective designing makes it possible for people to move toward reconciliation and coordination." (p. 38) And: "The incident is about identities and structures that are reified into solidity yet can be undone and redefined if enough social pressure or power or attractive alternatives can be mobilized. (). The image of 'community' as the rudimentary form of organization is crucial." (p. 38) And: "Hock declares an impasse and the imminent disbanding of VISA, which sets conversations in motion ()" (p. 39). And: "The incident is about flow, motion, dynamics, updating, negotiating, and malleability. The forms that impose order on the gatherings are transient forms that momentarily give meaning to shifting relationships." (p. 39) Hock understood that he could not set more in motion than purpose, principles and people, for the rest he had to rely on these people to self-organizing. It means that designers stop before they actually design structures and processes. Weick: "He underspecifies the structure and allows others to add in density. By doing so, he increases the chance that the designing will retain vitality because people on the frontline customize the procedures and structures to meet the demands they actually face." (p. 44) Looking at the progression from purpose (dream) to structure (practices), coordination is necessary. But the more coordination, the bigger the danger that the original dream gets lost. Therefore Gehry builds multiple models of the same dream, blending them into a final version. The final version is therefore less fixed and what is reified is still malleable. It leads to the possibility that the structure that contains in its reification a multiplicity of models is many structures and can update, "in other words, the structure is infused with the capability to be self-organizing." (p. 45). Weick concludes: "This is a portrait of designing that scholars of organizational design should take more seriously. Why? Because Frank Gehry has already demonstrated that it works." (p. 48) From a Systemic Intervention perspective, Koenigswieser describes an intervention practice based on a systemic integration management model. It has similarities with the design approach in the way it uses vision as a driver for development, differentiates between domain knowledge and process skills, acknowledges the import of additional knowledge that is not yet available inside the organization, includes an in-built, but open, result orientation, relies on a learning process to be modified according to progress, and uses a hybrid toolbox. Unlike design, the process is framed by means of the organizational enablers of strategy, structure and culture. And unlike design, it includes three levels of intervention and/or work: architecture level (meta-planning the intervention process), design level (planning the intervention formats), and tool level (working while using tools). In order to look more closely into the human system and bring it to the foreground, we will look at what constitutes a social system. Margolin conceives of a new practice of design and elaborates on the idea of "Human Systems Design" as a form of intervention. He specifically points to Vickers definition of human systems in that "the essence of a human system is that it is composed of humans beings who bring it into being by their actions and their experiences." (Vickers, p. 175, cited in Margolin, 2005) Senge points out that "This means that we often have the power to alter structures within which we are operating." (p. 4). The importance of human action within systems implies that these systems are inherently open to whatever result will be achieved by means of the process and human actions and experiences. Vickers calls these 'appreciative systems', characterized by three needs: "sufficient correspondence with reality to guide action; sufficient sharing with others to mediate communication; and sufficient acceptability to make life bearable." (Vickers, p. 55, cited in Margolin, 2005) For human system designers, Margolin proposes skills to "frame a design situation", "extensive social and political analysis and engagement as well as the formulation of design propositions" (p. 161), "the capacity to analyze those contexts and incorporate the analyses into the design of workable interventions" (p. 161), "the capacity to deal with the design of systems themselves in addition to the services that a system might deliver." (p. 161) And being an "advocate for adequate resources to ensure successful intervention." (p. 162) As well as "own techniques to model proposed intervention strategies." (p. 162) regarding behavioral change. But he is also saying that "As a new practice, the possibilities of human systems design are actually unknown." And "The methods required to achieve such ends are yet to be developed." (p. 161) And so it is the other way around. Organization as Social System Regarding the need to "mediate communication" Banathy & Jelnink point to the role of dialogue in conscious evolution. "Evolutionary design is a creating activity, which brings forth a potential-driven, intended novel socio-cultural system in the evolutionary design space () where alternative design ideas are proposed and tested for their viability and for the "goodness of fit" with their environment, which becomes their life-space." (p. 433). The methods are a combination of generative and strategic dialogue "of intentional social communication in an evolutionary designing community." (p. 433) The authors regard dialogue approaches, methods and tools as means of collective consensus building evolutionary design. Norum calls such conversation future search conversations where the goal is creating an ideal systems future based on a shared purpose, and are seen as design conversations. In order to look more closely into creation oriented transformative endeavors as put forward by social system theory, we will have a look at how design theory projects the organization as a place of creation oriented "synthetic competency" (Jonas, cited in Margolin, 2016, p. 55) Design for Transformation From the perspective of the design disciplines, the idea to work on structures and processes is in distant view. This can be understood when looking at two maturity frameworks developed in the field of design. They can be regarded as a push-strategy to avoiding commoditization and creating chances for value creating capacity in more strategic roles. Buchanan offers the four orders of design model, which maps four "places of invention" for design, moving from graphic communication (symbols: words and images/ symbolic and visual communications) to industrial design (physical objects/ material objects) and further to interaction design (activities, services, processes/ activities and organized services) and to organizational and systems design (systems, organizations, environments/ complex systems or environments for living, working, playing, and learning). NextDesign, from a consulting perspective, offers a similar model that is a sense-making framework in the form of a complexity scale that includes four paradigms to be used as practice and study zones. The framework relates to a scale of challenges in the real world and is process, not content focused. The scale moves from Design 1 (traditional design) to Design 2 (product design innovation), Design 3 (organizational transformation design), and Design 4 (social transformation design), but in reality these zones, like the four orders, exist in parallel. Both maturity trajectories trace a path towards greater complexity and serve the need to have meaningful discussions about design with organization leaders on how to use design. While Buchanan does not mention any competencies related to the four orders, NextDesign is very clear about them. Both Design 3 and 4 realms, in which cross-disciplinary project teams including customers, are working, include connected inbound and outbound tools and an externalized process separate from content to support unframed organizational challenges. Burns et al. are highlighting an evolving human-centered design practice to increasingly complex issues by means of interdisciplinary, participatory, capacity building oriented, unframed challenged focused, system and cultural change oriented design principles, processes and skills under the label "Transformation Design". While it highlights an intention aimed at conceiving of new solutions (systems, services, organizations, policies), it does not define the envisioned new type of practice, but mentions the "need to find ways of developing new skills and orientation on the 'supply side' of transformation design ( ) and champion a human-centered design approach at the highest levels." (p. 28) Jonas puts a seemingly related idea of "Transformation Design" in the context of a shift from growth-oriented economies towards a post-growth society with a focus on the "social dimensions and conditions of designing" (p. 009). Explicitly referring to Design 4 (social transformation design) in NextDesign's model and to Herbert Simon in the chapter on "Designing the Evolving Artifact", he mentions bottom-up design projects with transformative character that might spread. They hint to reversible and scalable projects in the form of local learning and living labs, aimed not at final solutions, but at increasing variety of choice and multiple possible alternatives. Jonas envisions broader notions of design, a shift from consumerism to organization and social issues by means of communication processes between people and within systems and networks, adding that "The main subject of transformation design is open communication processes, which serve for a creative enquiry into new potentialities and can be designed and realized in the form of new organizational structures and cultures, systemic innovations, or collaborative educational forms. The final goal is behavior change." (p. 009) In another text preceding "Transformation Design", Jonas elaborates on Design as Systemic Intervention. New systems thinking, based on second order cybernetics, provides tools for management theory and organizational development. Key are not new tools, but their intelligent combination, integration and application. The focus is on "discursive tools that structure the communicative process in design teams across disciplines and between stakeholders in the design process and make it transparent." (p. 12) Discursive tools work hand in hand with the design process that should be linked with circular learning models, "where abductive PROJECTION is the neglected link in the cycle" (Jonas, n. d.). In order for design becoming a partner discipline as part of the network of future-designing disciplines, he advocates to think in terms of change processes versus incidents. Because the latter is caused by underlying structures, designers need to have new skills to uncover structural causes of patterns. Concluding: "Thinking in terms of change processes rather than 'snapshots' is yet another way." (p. 19) Discussion So what? Starting with the question of "How might design influence organizational change and system behavior in organizations -understood as social systems?" we can say that the systemic intervention approach, which is already built on systems theory, offers a good heuristic to build upon a Strategic Design Planning competency aimed at organizational transformation. Driven by design theorists, design theory informed by systems theory is evolving, but related design education and design practice is lagging behind and disconnected from the real world of organizational and social transformation. The change fields are pulling design by means of the design process and related methods, but it is to be seen how that strategy will be unfolding. The theory-informed design maturity trajectories point towards increasing complexity and scale, and ask for a yet to be developed competency set. Communication and dialogue as well as discursive tools are at the heart of setting purpose, principles and people in motion towards self-organization. The continuum from purpose to practices denotes zones of danger for moving a grand idea or dream or vision through many people without getting lost. Such settings ask for new ways to support designing evolutionary communities. Bate, Khan and observe that "design is a bare bones framework on which a more organic, emergent, social structure develops as people interact, argue, fall out, come together, and otherwise manage their day to day situation" (p. 199, cited in Weick, 2004). This observation then shifts the idea of design towards a relationship between scaffold -the bare bones -and sculpture -the form -, of which both need to be designed. From a planning perspective the scaffold is about organizing and managing and how-related competencies, and the sculpture is about designing and innovating and what-related competencies in the form of bricolage-like representations linked to agreements. An iterative oscillation between chaos and order, abstract and concrete, structure and identity, as in more traditional design projects, remains. The social activity of sense-making will to a large degree increase at the expense of strange-making (or differentiation), thus shifting Strategic Design Planning activities upstream towards corporate-wide, transformation oriented strategies. Strange-making will most probably disappear in favor of an increase in designing transitional moments and forms to guide and stimulate conversations and agreements, and keeping options open as long as possible. Thus a set of competencies might include strong planning skills for architecting a design space, strong skills in designing options of open formats, and strong skills of social attention, conversation and dialogue to sense dynamics that might need to be nurtured and adjusted. By addressing the structural part of organizational transformation, it seems an exponential increase and shift in knowledge and skills will be needed. Conclusion Looking from a Strategic Design Planning perspective on the converging fields of social systems (theory) and systems theory informed design (theory) offers a rich perspective on potential competencies for a design practice aimed at organizational transformation. Such a competency might complement an evolving innovation-oriented strategic competency of design. Organizational and social transformation designers can tap into a rich reservoir of approaches and skills, with the goal of intelligent combinations. Nevertheless, a lot more work is required to arrive at first, good heuristics to launch as prototypes into the world of organizations. This approach resonates well with Laloux's principle of "listening to evolutionary purpose" of the organization as a principle for transformation. We would add here a "listening to evolutionary purpose" of the evolving field of design.
<reponame>jojochuang/eventwave /* * Serializable.cc : part of the Mace toolkit for building distributed systems * * Copyright (c) 2011, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the names of the contributors, nor their associated universities * or organizations may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ----END-OF-LEGAL-STUFF---- */ #include <sstream> #include <iostream> #include "Serializable.h" #include "XML_RPCSerialization.h" #include "Log.h" #include "mstring.h" namespace mace { int nextLog = 1; // the events table gets id 0 int nextEvent = 0; int nextLogId() { return nextLog++; } int nextEventId() { return nextEvent++; } int LogNode::simpleCreate(const std::string& type, const mace::LogNode* node) { int next = nextIndex(); if (next == 0) { std::ostringstream out; out << "CREATE TABLE " << node->logName << " (_id INT PRIMARY KEY, value " << type << ");" << std::endl; Log::logSqlCreate(out.str(), node); } return next; } // int getIndex(const std::string& tableName) { // return tableOffsetMap[tableName]; // } int Serializable::deserializeXML_RPC(std::istream& in) throw(SerializationException) { // throw SerializationException("XML-RPC deserialization not supported"); std::string s; int r = mace::deserializeXML_RPC(in, &s, s); istringstream is(s); deserialize(is); return r; } void Serializable::serializeXML_RPC(std::string& str) const throw(SerializationException) { // throw SerializationException("XML-RPC serialization not supported"); std::string s; serialize(s); mace::serializeXML_RPC(str, &s, s); } } // namespace mace
/******************************************************************************** ** Copyright(c) 2015 USTC & MSRA All Rights Reserved. ** auth: <NAME> ** mail: <EMAIL> ** date: 2015/12/13 ** desc: Caffe-video common *********************************************************************************/ #include <string> #include <utility> #include <vector> #include "caffe/layers/video_common.hpp" namespace caffe { using namespace std; vector<int> video_shape(int num, int channels, int length, int height, int width) { vector<int> shape(5, 0); shape[0] = num; shape[1] = channels; shape[2] = length; shape[3] = height; shape[4] = width; return shape; } } // namespace caffe
/* * (c) 2014 LinkedIn Corp. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use * this file except in compliance with the License. You may obtain a copy of the * License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. */ package com.linkedin.cubert.memory; import com.linkedin.cubert.block.BlockSchema; import com.linkedin.cubert.block.ColumnType; import com.linkedin.cubert.block.DataType; import org.apache.pig.backend.executionengine.ExecException; import org.apache.pig.data.BagFactory; import org.apache.pig.data.DataBag; import org.apache.pig.data.Tuple; import org.apache.pig.data.TupleFactory; import org.testng.Assert; import org.testng.annotations.Test; import com.linkedin.cubert.utils.DataGenerator; import java.util.Iterator; /** * Unit Test class for IntArrayList, LongArrayList, DoubleArrayList, SegmentedArrayList * * Created by spyne on 1/8/15. * */ public class TestSegmentedArrayLists { @Test public void testIntArrayListAddAndGet() throws Exception { IntArrayList list = new IntArrayList(101); DataGenerator dgen = new DataGenerator(); final int size = 1000; final int[] ints = dgen.randomInts(size); for (int i = 0; i < size; ++i) { list.addInt(ints[i]); } Assert.assertEquals(size, list.size()); for (int i = 0; i < size; ++i) { Assert.assertEquals(ints[i], list.get(i)); } for (int i = 0; i < size - 1; ++i) { final Integer act1 = ints[i], act2 = ints[i+1]; Assert.assertEquals(act1.compareTo(act2), list.compareIndices(i, i + 1)); } } private static int upperBound(int number, int multipleOf) { if (number % multipleOf == 0) return number; return ((number / multipleOf) + 1) * multipleOf; } @Test public void testIntArrayGrowability() throws Exception { final int BATCH_SIZE = 10; IntArrayList list = new IntArrayList(BATCH_SIZE); final int MINUS_FOUR = -4; list.setDefaultValue(MINUS_FOUR); // ensure that it can hold 25 elements final int INITIAL_SIZE = 25; list.ensureCapacity(INITIAL_SIZE); Assert.assertEquals(list.capacity(), upperBound(INITIAL_SIZE, BATCH_SIZE)); // test that all 25 elements are set to default value for (int i = 0; i < INITIAL_SIZE; i++) { Assert.assertEquals(list.getInt(i), MINUS_FOUR); } // update values for some elements final int NEW_VALUE = 3; for (int i = 0; i < 10; i++) { list.updateInt(i, NEW_VALUE); } // test for (int i = 0; i < 25; i++) { Assert.assertEquals(list.getInt(i), i < 10 ? NEW_VALUE : MINUS_FOUR); } // resize final int INCREASED_SIZE = 39; list.ensureCapacity(INCREASED_SIZE); Assert.assertEquals(list.capacity(), upperBound(INCREASED_SIZE, BATCH_SIZE)); // test values are not affected by growing for (int i = 0; i < INCREASED_SIZE; i++) { Assert.assertEquals(list.getInt(i), i < 10 ? NEW_VALUE : MINUS_FOUR); } // reset final int RESET_SIZE = 12; list.reset(RESET_SIZE); Assert.assertEquals(list.capacity(), upperBound(RESET_SIZE, BATCH_SIZE));; // test values are reset as well for (int i = 0; i < RESET_SIZE; i++) { Assert.assertEquals(list.getInt(i), MINUS_FOUR); } } @Test public void testLongArrayListAddAndGet() throws Exception { LongArrayList list = new LongArrayList(101); DataGenerator dgen = new DataGenerator(); final int size = 1000; final long[] longs = dgen.randomLongs(size); for (int i = 0; i < size; ++i) { list.addLong(longs[i]); } Assert.assertEquals(size, list.size()); for (int i = 0; i < size; ++i) { Assert.assertEquals(longs[i], list.get(i)); } for (int i = 0; i < size - 1; ++i) { final Long act1 = longs[i], act2 = longs[i+1]; Assert.assertEquals(act1.compareTo(act2), list.compareIndices(i, i + 1)); } } @Test public void testDoubleArrayListAddAndGet() throws Exception { DoubleArrayList list = new DoubleArrayList(101); DataGenerator dgen = new DataGenerator(); final int size = 1000; final double[] doubles = dgen.randomDoubles(size); for (int i = 0; i < size; ++i) { list.add(doubles[i]); } Assert.assertEquals(size, list.size()); for (int i = 0; i < size; ++i) { Assert.assertEquals(doubles[i], list.get(i)); } for (int i = 0; i < size - 1; ++i) { final Double act1 = doubles[i], act2 = doubles[i+1]; Assert.assertEquals(act1.compareTo(act2), list.compareIndices(i, i + 1)); } } @Test public void testSegmentedArrayListAddAndGet() throws Exception { ObjectArrayList list = new ObjectArrayList(101); DataGenerator dgen = new DataGenerator(); final int size = 1000; final String[] strings = dgen.randomStrings(size); for (int i = 0; i < size; ++i) { list.add(strings[i]); } Assert.assertEquals(size, list.size()); for (int i = 0; i < size; ++i) { Assert.assertEquals(strings[i], list.get(i)); } for (int i = 0; i < size - 1; ++i) { final String act1 = strings[i], act2 = strings[i+1]; Assert.assertEquals(act1.compareTo(act2), list.compareIndices(i, i + 1)); } } @Test public void testBagArrayList() throws Exception { SegmentedArrayList array = new BagArrayList(new BlockSchema("INT a, DOUBLE b, STRING c"), false); final int N = 10000; DataBag[] bags = new DataBag[N]; int counter = 0; for (int i = 0; i < N; i++) { Tuple[] tuplesInBag = new Tuple[(i % 5) + 1]; for (int j = 0; j < tuplesInBag.length; j++) { tuplesInBag[j] = createTuple(counter, counter * 1.0, Integer.toString(counter)); counter++; } bags[i] = createBag(tuplesInBag); } for (DataBag bag: bags) array.add(bag); Assert.assertEquals(array.size, N); for (int i = 0; i < bags.length; i++) { assertBagEqual((DataBag) array.get(i), bags[i]); } } @Test public void testNestedSchema() throws Exception { ColumnType tupleFieldType = new ColumnType("element", DataType.TUPLE, new BlockSchema("STRING name, STRING term, FLOAT value")); BlockSchema tupleSchema = new BlockSchema(new ColumnType[] { tupleFieldType }); ColumnType bagType = new ColumnType("bag", DataType.BAG, tupleSchema); BlockSchema schema = new BlockSchema(new ColumnType[] { new ColumnType("member_id", DataType.INT), bagType }); final int N = 10000; int counter = 1; Tuple[] data = new Tuple[N]; for (int i = 0; i < N; i++) { Tuple[] tuplesInBag = new Tuple[(i % 5) + 1]; for (int j = 0; j < tuplesInBag.length; j++) { tuplesInBag[j] = createTuple("name " + counter, "term " + counter, (counter % 3 == 0) ? null : counter * 1.0f); counter++; } DataBag bag = createBag(tuplesInBag); data[i] = createTuple(i, bag); } ColumnarTupleStore store = new ColumnarTupleStore(schema, true); for (Tuple t: data) store.addToStore(t); Assert.assertEquals(store.getNumTuples(), N); for (int i = 0; i < N; i++) { Tuple actual = store.getTuple(i, null); Tuple expected = data[i]; Assert.assertEquals(actual.get(0), expected.get(0)); assertBagEqual((DataBag) actual.get(1), (DataBag) expected.get(1)); } } private Tuple createTuple(Object... args) throws ExecException { Tuple tuple = TupleFactory.getInstance().newTuple(args.length); for (int i = 0; i < args.length; i++) { tuple.set(i, args[i]); } return tuple; } private DataBag createBag(Tuple... tuples) { DataBag bag = BagFactory.getInstance().newDefaultBag(); for (Tuple tuple: tuples) bag.add(tuple); return bag; } private void assertBagEqual(DataBag bag1, DataBag bag2) { Iterator<Tuple> it1 = bag1.iterator(); Iterator<Tuple> it2 = bag2.iterator(); while (it1.hasNext()) { Assert.assertTrue(it2.hasNext()); Tuple tuple1 = it1.next(); Tuple tuple2 = it2.next(); Assert.assertEquals(tuple1, tuple2, tuple1.toString() + " != " + tuple2.toString()); } Assert.assertFalse(it2.hasNext()); } }
/* Function: al_mutex_destroy Description: Destroy a mutex previously initialized with <al_mutex_init>. */ int al_mutex_destroy(al_mutex_t *mutex) { #if THDEBUG>2 th_printf(" MDestroy:%08x,%08x\n",mutex,*mutex); #endif return pthread_mutex_destroy((pthread_mutex_t *)mutex); }
You probably don’t think about the environmental effect of your inhaler, but a new report says it might be more damaging than you think. The National Institute for Health and Care Excellence (NICE) said 70% of inhalers used by those with asthma in the UK emit high levels of greenhouse gasses. In comparison, those made in Sweden make up 10%. There are different types of inhalers which all have varying carbon footprints. While a breath-actuated metered dose inhaler (BAI) is widely used in the UK, NICE said more people could be using a dry powder inhaler. A dry inhaler is better for the environment as it contains 25 times fewer pollutants such as carbon dioxide. It is the first time NICE has looked at the carbon footprint of medicine and is part of the NHS’s ten-year plan to be more environmentally conscious. The public health body published a report detailing how more people could be using dry inhalers.. Asthma affects the airways and can make it difficult to breathe. It may cause symptoms such as chest tightness, wheeziness or coughing. Inhalers are devices that deliver medicine into the lungs to help with the symptoms of asthma. There are different types of medicines available. Your healthcare professional will discuss with you which medicine is recommended for you. Most asthma medicines are available in more than one type of inhaler. NICE says everyone should be able to choose the inhaler they find easiest to use. NICE also says that everyone should have the way they use their inhaler checked regularly. If needed, people should be given advice on how to improve their technique. The metered dose inhaler has propellents called hydrofluorocarbons which deliver medicine quickly to a person having an asthma attack. This lets out 500g carbon dioxide per dose and if you take five doses, it is equivalent to the carbon footprint of a car on a nine-mile trip. Dry inhalers only emit 20g of carbon dioxide but these types are recommended for those with milder attacks. ‘Patients need to talk to health staff about what inhalers are best for them,’ said Professor Gillian Leng from NICE. ‘People who need to use metered dose inhalers should absolutely continue to do so, but if you have the choice of a green option – do think about the environment.
Rapamycin suppresses axon sprouting by somatostatin interneurons in a mouse model of temporal lobe epilepsy Purpose: In temporal lobe epilepsy many somatostatin interneurons in the dentate gyrus die. However, some survive and sprout axon collaterals that form new synapses with granule cells. The functional consequences of aminobutyric acid (GABA)ergic synaptic reorganization are unclear. Development of new methods to suppress epilepsyrelated interneuron axon sprouting might be useful experimentally.
Reconsidering the Displacement Hypothesis This study addresses continuing concern over television's displacement of other leisure activities form both substantive and methodological perspectives. It examines past conceptualizations of the mechanism by which television is assumed to displace other activities. Following a critical review of the displacement literature, the authors examine data from an 8-year panel study of the introduction of television to South Africa and use a variety of methodological approaches to illustrate a major source of inconsistency in findings from previous studies. The displacement mechanism is found to be asymmetric in nature; that is, although increases in television viewing force out some other activities, decreases in television viewing do not result in parallel increases in levels of any of these activities. This pattern of findings was most pronounced in the case of radio use and movie attendance. Implications for conceptualization of the displacement process are discussed in relation to these findings.
The Effect of Different Doses of Vitamin D Supplementation on Insulin Resistance in ovariectomized rats Background and Aim: Type 2 diabetes mellitus (T2DM) and vitamin D deficiency are both too common during menopause. Since the effect of different doses of vitamin D supplements on blood sugar, insulin concentration and insulin resistance are unknown, the present study aimed at investigating the effects of different doses of the vitamin D supplements on visceral fat, blood sugar, insulin concentration, and insulin resistance in ovariectomized rats. Materials and Methods: In this randomized experimental study, 32 female Wistar rats were divided into 4 equal groups as follows: three groups. that received vitamin D supplements (high, moderate, and low dose) and one control group. After 8 weeks of different doses of vitamin D supplementation plasma concentration of glucose, insulin and HOMA-IR were measured in the three groups. The obtained data was statistically analyzed by means of dependent t-test and ANOVA. at the significance level of P<0.05. Results: After a period of eight-week intervention, body weight, BMI, waist circumference, visceral fat, insulin, blood glucose and HOMA-IR at high, moderate, and low doses of vitamin D supplementation were significantly lower than those in the control group (P<0.05). High dose of vitamin D compared with moderate and low doses significantly caused reduction in insulin, blood glucose, and HOMA-IR (P<0.001 for all three variables). Conclusion: The findings of the current study showed that a high dose of vitamin D causes significant improvements in FPG, insulin, and insulin resistance evaluated by HOMA-IR. It was also found that adding vitamin D supplements can improve glucose control in menopause model of rats.
package de.metahlfabric; import com.google.gson.*; import org.hyperledger.fabric.contract.annotation.DataType; import org.hyperledger.fabric.contract.annotation.Property; import java.util.ArrayList; /** * A PrivateMetaObject is linked to a {@link MetaObject} and stores the private information. * * @author <NAME>, <NAME> * <p> * Copyright 2021 OTARIS Interactive Services GmbH * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @DataType() public class PrivateMetaObject { /** * The private attributes to store */ @Property() ArrayList<MetaAttribute<?>> attributes = new ArrayList<>(); /** * Class constructor */ public PrivateMetaObject() { } /** * @return the map of private attributes */ public ArrayList<MetaAttribute<?>> getAttributes() { return attributes; } /** * @param attrName the name of the attribute to add * @param attrValue the value of the attribute to add */ public void addAttribute(String attrName, int version, String attrValue, MetaDef.AttributeDataType type) throws NumberFormatException, JsonSyntaxException { this.deleteAttribute(attrName); switch (type) { case Boolean: attributes.add(new MetaAttribute<>(attrName, version, Boolean.parseBoolean(attrValue))); break; case Number: attributes.add(new MetaAttribute<>(attrName, version, Double.parseDouble(attrValue))); break; case String: attributes.add(new MetaAttribute<>(attrName, version, attrValue)); break; case BooleanArray: ArrayList<Boolean> array = new ArrayList<>(); JsonArray jsonArray = new Gson().fromJson(attrValue, JsonArray.class); for (JsonElement number : jsonArray) { array.add(number.getAsBoolean()); } attributes.add(new MetaAttribute<>(attrName, version, array)); break; case NumberArray: ArrayList<Double> array2 = new ArrayList<>(); JsonArray jsonArray2 = new Gson().fromJson(attrValue, JsonArray.class); for (JsonElement number : jsonArray2) { array2.add(number.getAsDouble()); } attributes.add(new MetaAttribute<>(attrName, version, array2)); break; case StringArray: ArrayList<String> array3 = new ArrayList<>(); JsonArray jsonArray3 = new Gson().fromJson(attrValue, JsonArray.class); for (JsonElement text : jsonArray3) { array3.add(text.getAsString()); } attributes.add(new MetaAttribute<>(attrName, version, array3)); break; } } /** * @param attrName the name of the attribute to delete */ public void deleteAttribute(String attrName) { for (MetaAttribute<?> attribute : this.attributes) { if (attribute.name.equalsIgnoreCase(attrName)) { attributes.remove(attribute); return; } } } /** * @return the object as a json string */ public String toString() { return toJSONString(); } /** * @return the object as a json string */ public String toJSONString() { return new Gson().toJson(this); } /** * @return the json object */ public JsonObject toJSON() { return new Gson().fromJson(this.toJSONString(), JsonObject.class); } }
Occupant behaviour and thermal comfort in buildings: Monitoring the end user Studies indicate that the energy performance gap between real and calculated energy use can be explained for 80% by occupant behaviour. This human factor may be composed of routine and thermoregulatory behaviour. When occupants do not feel comfortable due to high or low operative temperatures and resulting high or low skin temperatures, they are likely to exhibit thermoregulatory behaviour. The aim of this study is to monitor and understand this thermoregulatory behaviour of the occupant. This is a detailed study of two females living in a rowhouse in the city of Heerlen (Netherlands). During a monitoring period of three weeks over a time span of three months the following parameters were monitored: activity level, clothing, micro climate, skin temperatures and thermal comfort and sensation. Their micro climate was measured at five positions on the body to assess exposed near body conditions and skin temperature. Every two hours they filled in a questionnaire regarding their thermal comfort and sensation level (7-point scale), clothing, activities and thermoregulatory behaviour. The most comfortable (optimal) temperature was calculated for each person by adopting a biophysical model, a thermoneutral zone model. This study shows unique indivual comfort patterns in relation to ambient conditions. An example is given how this information can be used to calculate the buildings energy comsumption. Introduction Studies indicate that the human factor is responsible for 80% of the performance gap between calculated and realized energy use. Over the last couple of years, it has become easier to calculate building related energy performance with simulation programs. However, these calculations are focussed on energy use of buildings, based on fixed user behaviour conditions and reference climate years. Therefore, these calculations lack information about real life performances. In this way an energy performance gap is inevitable and a result of this gap are strong deviations in energy use between almost similar apartments.This is caused by "the human factor" with daily routines and thermoregulatory behaviour as most important parameters. Examples of thermoregulatory behaviour are moving from one room to another, changing clothes and adjusting the thermostat. These actions are taken to increase comfort levels. Previous research of Fanger indicated that there are a number of parameters that contribute to the thermal environment, such as air temperature, mean radiant temperature, relative air velocity, vapor pressure in ambient air, the human activity level and the clothing worn. This is called the PMV-model and is based on the predicted mean vote (PMV) of the general population. Several studies show a good agreement between the PMV and the actual mean vote, this is particularly found for uniform and steady-state environments. Other studies found differences between the PMV and the actual mean vote due to differences in subpopulations (e.g. males versus females, lean versus obese, elderly and young). In this study an adapted version of the thermoneutral zone (TNZ) model is used which describes thermal comfort in relation to environmental and skin temperature and is a more individual approach to investigate comfort. This model may be more suitable to clarify the variation in heating demand between similar dwellings by means of differences in thermoregulatory behaviour of occupants. The area in which people feel neutral in their environment is called the TNZ, wherein the comfort zone acts as centre. The TNZ is defined as 'the range of ambient temperature at which temperature regulation is achieved only by control of sensible (dry) heat loss, i.e. without regulatory changes in metabolic heat production or evaporative heat loss'. This definition only includes physiological mechanisms of the body, e.g. changing the skin temperature to maintain the body's core temperature, and does not include the individual thermoregulatory behaviour of people. ASHRAE concluded that for people thermal behaviour is for people a major factor to come to satisfaction with their thermal environment. The thermoregulatory behaviour will therefore probably occur when occupants experience discomfort. Previous research has focused only on experiments in laboratories or artificial environments designed to reflect the real living or working space. No experiments have been conducted with test-subjects in their own home or working space. This lack is caused by the difficulty to control all the physical parameters in a dwelling (e.g. temperature, relative humidity, ventilation, air velocity, CO2 concentration). Therefore, during this study the testsubjects were living in a fully monitored dwelling where they were closely observed and monitored to obtain insight in their individual thermoregulatory behaviour. The aim of this study is to provide more insight in the thermal environment of occupants in real life situations and to explain differences in energy use by these differences in individual thermoregulatory behaviour in similar dwellings using measured fysiological parameters and a biophysical model. The thermoneutral zone model The TNZ model includes physiological differences between occupants. From a biological point of view the body wants to use as little energy as possible to ensure its core temperature, which is shown in Fig. 1. Fig. 1. Schematic view of the thermoneutral zone where the body is able to maintain body temperature without regulatory increases in metabolic rate or sweating. The distance to the center of the thermoneutral zone is hypothesized as a driving factor for thermal behaviour. The core temperature of a body is very stable and around 37 degrees Celsius. The area in which the body is in neutral state is called the thermoneutral zone, wherein the comfort zone acts as centre. When the operative temperature increases or decreases the body has to work harder to maintain its core temperature by regulating its veins to increase or decrease the heat loss of the body. When regulating through dry heat loss is not sufficient enough, the temperature will be regulated by sweating or shivering. In addition thermoneutrality can be achieved at a range of body skin temperatures. The skin temperature can be determined by the amount of body fat and the amount of produced heat; known by the metabolism of a person. As a result, the optimal operative temperature can be determined by combining the thermal resistance of the clothing with the relative humidity and the air velocity of the environment. The influence of all these parameters can be derived from these calculations, this makes it possible to calculate the TNZ as a relation of the skin temperature and the operative temperature, see Fig. 2. skin temperature and operative temperature As can be seen in Fig. 2, the grey zone indicates the thermoneutral zone. The position of this zone in the graph depends on the characteristics of the person, like body fat and metabolism. In addition, the position depends on the clo-value, relative humidity and air velocity. Outside of this zone, the body will need to regulate its temperature more extensively trough sweating and shivering, which can be experienced as uncomfortable. The thermoneutral zone model can be used to explain differences in comfort and thermoregulatory behaviour between occupants. To use the thermoneutral zone model, the skin-and exposed temperature of the occupants need to be measured. These combinations of temperatures can then be used to predict the comfort level and the expectation that the occupant exhibits thermoregulatory behaviour. Energy consumption model The goal of this study is also to explain differences in energy use due to differences in thermoregulatory behaviour of occupants in similar dwellings. The hypothesis is whether a difference in comfortable indoor temperature between occupants might be the reason for variation in energy demand between similar dwellings. Therefore, real-life measurements are taken into account when calculating the energy use of the dwelling in order to include the actual differences in setpoint temperatures. For this purpose a correlation model is developed. In equation, Q is the total heating demand for a dwelling in kWhth. Qout stands for the heat flows going out of the dwelling, e.g. ventilation, infiltration and transmission. Qin stands for the incoming heat flows by internal heat gain and solar radiation. The development of the model is based on equation. In addition, this model is developed because it is easy adjustable to different utilization and building characteristics, and it is possible to investigate differences in utilization of the dwelling by for example revere modelling. Furthermore, real time measurements can be used in the calculation of Q (e.g. in-and outdoor temperatures, ventilation flow, wind speed and energy use of appliances and installations). From previous research it is known that there is a strong linear correlation between the energy demand and the difference between in-and outdoor temperature (T). This means that the energy demand will always increase when the difference between the in-and outdoor temperature increases (a higher T). Fig. 3 shows this principle. The slope and the place of the line in the graph depends on different aspects and the utilization of the dwelling, such as the number of occupants, the ventilation rate, the internal heat load, the efficiency of the energy production and the amount of insulation. Methods The two female subjects (age 22 and 20) lived in a single family rowhouse which was recently renovated to a near zero energy building. There were three measuring weeks in total over a period of three months (Oct -Dec 2015). Data was collected in several ways to get more insight in the thermal environment and the behaviour of occupants in real life situations. Data collection First, general information was collected about the subject before the measurements started. The test-subjects filled in a general questionnaire, regarding illnesses and medicine use. In addition, anthropometric data was collected, e.g. age, sex, height and weight. The female test-subjects were using a birth control pill or IUD and were not measured during their menstruation period to exclude hormonal effects on thermoregulation. The data was used to calculate the body metabolic rate (BMR) with the Harrison-Benedict equation and the total body surface is calculated with the Dubois method. Second, data was collected by energy use measurements of the house and the following parameters were measured in the house: in-and outdoortemperatures, set-point temperatures, CO2 concentrations, ventilation flows, heat flow of central heating and tapwater, electrical power use, the electrical use of installations (heatpump, boilers and ventilation). Third, data was collected by using different sensors on the subjects bodies: skin temperature sensors (Fig. 4), an iButton as a brioche on the outer side of their clothing to measure the micro climate (exposed temperature), temperature and relative humidity (this was placed with a brioche at the outer layer of the clothing), and an Actiwatch® to measure their activity level. Fourth, data was collected by questionnaires which had to be filled in by the subjects every two hours. The questions were about: their perceived thermal comfort and sensation (7-point scale), their clothing, their activity level and thermoregulatory behaviour. The corresponding clovalues were calculated using the results from the questionnaire. Results Most of the results presented in this paper are of one female test-subject, test-subject 1. The same analysis is conducted on the data of test-subject 2. During this study the different perceived sensation and comfort votes of the two subjects are handled separately. For both test-subjects no correlations could be found between skin temperatures and comfort or sensation vote (respectively R = 0.0087, p = 0.1947 and R = 0.0007, p = 0.7212). In the PMV-model of Fanger, sensation and comfort are identified as one and the same. Fig. 5 shows the comfort vote in relation to the sensation vote. A significant correlation can be found (R = 0.5000, p = 0.0279). As can be derived through the trend line: the test-subject 1 is most comfortable (average comfort vote of 1) at a sensation vote of 1, 'slightly warm'. test-subject 2 showed the same trend. Fig. 6. Mean skin temperature against exposed temperature (R = 0.0100, p = 0.1177) of test-subject 1 for all measuring weeks Fig. 6 shows the mean skin temperature in relation to the exposed temperature, which is the ambient temperature in proximity of the subject (sensors on the outer layer of the clothes). From the more than 2000 measuring points, no real correlations can be found (R = 0.0100, p = 0.1177). Although, the density of the measuring points indicate that there are skin temperature and exposed temperature combinations that occured frequently. maximum comfort (score of 1 or higher in the questionnaire). A difference in comfort and thermoregulatory behaviour is shown between the two subjects. In Fig. 8 the contour plots are shown from test-subject 2 (upper graph) and subject 1 (lower graph). The both graphs show that the measurement zones of test-subject 2 are moved to the left of the graph compared to the measurements of test-subject 1. This means that test-subject 2 is more comfortable at lower temperatures than test-subject 1. Fig. 8. Contourplot of the exposed-and skin temperatures for test-subject 2 (upper graph) and test-subject 1 (lower graph) Combining this information with the questionnaire outcomes identifies center a. in Fig. 7 with measurements during the night. The test-subjects wore the sensors continuously (24hours/ day), which means that there are also many measuring points during the night. Center b. corresponds with standing activities in an indoor environment with regular winter clothing (e.g. jeans and sweater, clo-value ±0.8). Center c. corresponds with sitting/resting activities and lower clo-values (<0.6). Fig. 9. Contourplot of measurements including the calculated TNZ for this test-subject 1 with parameters that should represent the micro climate in bed Fig. 9 shows the thermoneutral zone with the characteristics for test-subject 1 with parameters that should represents the micro climate in bed, e.g. a low air velocity, a high relative humidity, a low metabolism and a high clo-value for the blanket. As can be seen in the Figure, this calculated TNZ corresponds with the measured data (hot spot "a"). 10 shows a range of calculated thermoneutral zones with the characteristics for this test-subject with parameters that should represents deviations for a normal indoor climate during the day. A range of TNZ's is calculated because the relative humidity, air velocity and metabolism can change every hour. As can be seen in Fig. 10, the calculated thermoneutral zones correspond with the measured data. In addition, as a more detailed example, we describe two examples in more detail. A short summary of December 13 th 2015 can be found in Table 1. Going to sleep Fig. 11 shows the first measuring point of December 13 th 2015 of test-subject 1, at 10:37 hour. She is still in bed, according to her schedule. Relative humidity was 56%, the exposed temperature 17.6C, and the weighted average skin temperature 35.2C. According to her entries in the questionnaire, the Clo-value was 0.48, and the corresponding metabolic rate and air velocity were 0.9 Met and 0.05 m/s respectively. She entered a sensation vote of 1.8 and a comfort vote of 1.07, so she feels warm and this is just comfortable. Fig. 11. Thermoneutral zone (in red) and comfort zone (red dot) calculated for measuring point 13 th December 10.37h for testsubject 1, including the measured weighted mean skin temperature (blue horizontal line) and measured exposed temperature (blue vertical line) The body can only regulate its temperature in the surrounding environment by regulating its heat loss by dilatation and contraction of blood vessels, which increases or decreases the skin temperature. Therefore, a higher skin temperature suggests that the test-subject feels warm and a lower skin temperature suggests that the test-subject feels cold. In Fig. 11 the skin temperature of test-subject 1 is within the higher end of the calculated thermoneutral zone. This corresponds with her sensation and corresponding comfort vote. The exposed temperature is lower than the TNZ. This indicates that the person is not in heat balance: more heat is lost and consequently she would probably start feeling colder if she does not change her environment, her clothing or her activity level. A quarter of an hour later she is out of bed, according to her schedule. Fig. 12 shows this measuring point. Her environment, clothing and activity level are not changed after the last measuring point. Although, her sensation and comfort vote did change. She feels slightly cool, although close to neutral and this is just comfortable (sensation vote = -0.06, comfort vote = 1.20). Her weighted average skin temperature has decreased to 33.5C. The exposed temperature is still lower than the thermoneutral zone. Her body responded to the relative cold exposed temperature; blood vessels are contracted, which resulted in a lower skin temperature. In addition, her sensation and comfort vote changed negatively although she still feels neutral. The body is still adapting to the colder exposed temperature, the person is still not in heat balance, more heat is lost. Most likely, she would start feeling colder if she would not change her environment, her clothing or her activity level. Fig. 12. Thermoneutral zone (in red) and comfort zone (red dot) calculated for measuring point 13 th December 10.50h for testsubject 1, including the measured weighted mean skin temperature (blue horizontal line) and measured exposed temperature (blue vertical line) Optimal indoor temperatures The comfort zone is likely situated around the center of the thermoneutral zone, the corresponding operative temperature is the most comfortable (optimal) indoor temperature for that person at that moment (Topt). Because parameters such as relative humidity, air velocity, activity level and clothing can vary over time this will lead to a different comfort zone and thus a different optimal indoor temperature. The thermoneutral zone is calculated from the 57 questionnaire entries of test-subject 1. For all these moments a Topt is calculated. This calculation is a steady state calculation, and the human body adapts to the exposed temperature with a slight delay over time. However, the average Topt per test-subject can give an insight in the differences in prefered indoor temperatures. For each test-subject the Topt per questionnaire entry was calculated for one week. Per day the average optimal temperature (Topt) was calculated to compare the differences in thermal comfort between the two testsubjects, see Table 2. Discussion The calculation of Topt is a steady state calculation, and the human body adapts to the exposed temperature with a slight delay over time, because the calculation is depended on the physiology of a person (e.g. gender, weight, height, amount of body fat and metabolism) and his or her clothing. A person with little body fat is more sensitive to variations in ambient temperatures, because of the lack of thermal resistance. Consequently, a person with more body fat is less sensitive to deviations in ambient temperatures. Furthermore, a taller person has a higher body surface, so will easier be cold. All these parameters will have an influence on the speed at which the body responds to changes in exposed temperatures. Both test-subjects are female and approximately the same age, height and weight. Although their calculated optimal temperatures differ. From the questtionaire inputs is is known that test-subject 1 changed to relax wear after returning from school (lower Clo value), in addition testsubject 2 was more active in the household (higher metabolism). These differences may explain the differences in the optimal calculated temperatures for both test-subjects. However, it is not always possible to change the temperature set-points for example in office buildings or when multiple people are living in the same dwelling, with all different comfort levels. As an addition to changing setpoint temperatures, people can reduce their sensitivity for variations in temperatures by changing the worn clothing. More clothing, a higher clo-value, will result in a shift of the TNZ to a lower ambient temperature. Less clothing will result in the oposite. This is probably why two almost optically similar women, can have a differentiation between their calculated optimal temperatures, while adapting to eachother because they live together. By combining the energy measurement of the dwelling together with the calculated optimal temperatures of both test-subjects and the measured temperature setpoint, it is possible to calculate the dwellings energy expenditure, using the earlier presented correlation model (Fig. 13). The graph shows the result of one day where both test-subject 1 and 2 were measured. Fig. 13. Differences in energy use because of physiological differences -10 th of November 2015. Including the calculated average Topt for test-subject 2 (red) and 1 (blue), the measured T of the living room (grey), the energy demand line depending on the amount of infiltration (dark blue) and the measured (black dot) and calculated (blue dot) energy demand For each test-subject the optimal temperature (Topt) per questionnaire entry was calculated. Per day the average optimal temperature (Topt) was calculated and substracted by the outdoor temperature of that day (∆T), to exclude the influence of the weather. On the right the histograms per day and per test-subject of all the calculated optimal temperatures (substracted by the outdoor temperature) are shown. On the left again the average calculated Topt are shown for test-subject 1 and 2 (red and blue line). The grey line corresponds with the average indoor livingroom temperature of that day, subtracted by the corresponding Conclusion From the results it can be concluded that the gap between measured and calculated energy use will probably not completely be explained by using the TNZ model. Nevertheless, this model can explain differences in individual indoor temperature preferences which might contribute for a (large) part to the gap. This study highlights the differences in individual thermoregulatory behaviour, comfort and resulting optimal indoor temperatures. When the average calculated optimal temperatures are being compared, differences in optimal indoor temperatures can be distinguished. From these calculations, the following conclusions can be drawn: testsubject 2 is more comfortable at lower temperatures compared to test-subject 1. the set-point temperature of the living room was in the middle of these calculated optimal temperatures as was seen in the measurements. This example shows that the energy use can differ a factor 2 between individuals when the average optimal indoor temperatures of serve as input in the model.. Both test-subjects are female and approximately the same age, height and weight. Several studies suggest that there might be great differences in comfort between men and women, lean and obese and young and elderly. This study shows also that there are also significant differences between two almost similar females and that the energy demand might vary up to a factor 2. In addition,the differences between subpopulations might be even greater. This might explain why in some projects the highest measured energy consumption is a factor 5 higher than the lowest measured energy consumption.
Evaluation of Machine Learning-based Anomaly Detection Algorithms on an Industrial Modbus/TCP Data Set In the context of the Industrial Internet of Things, communication technology, originally used in home and office environments, is introduced into industrial applications. Commercial off-the-shelf products, as well as unified and well-established communication protocols make this technology easy to integrate and use. Furthermore, productivity is increased in comparison to classic industrial control by making systems easier to manage, set up and configure. Unfortunately, most attack surfaces of home and office environments are introduced into industrial applications as well, which usually have very few security mechanisms in place. Over the last years, several technologies tackling that issue have been researched. In this work, machine learning-based anomaly detection algorithms are employed to find malicious traffic in a synthetically generated data set of Modbus/TCP communication of a fictitious industrial scenario. The applied algorithms are Support Vector Machine (SVM), Random Forest, k-nearest neighbour and k-means clustering. Due to the synthetic data set, supervised learning is possible. Support Vector Machine and k-nearest neighbour perform well with different data sets, while k-nearest neighbour and k-means clustering do not perform satisfactorily.
GUESS: projecting machine learning scores to well-calibrated probability estimates for clinical decision-making Motivation Clinical decision-support-systems (CDSS) have been applied in numerous fields, ranging from cancer survival towards drug resistance prediction. Nevertheless, CDSS typically have a caveat: many of them are perceived as black-boxes by non-experts and, unfortunately, the obtained scores cannot usually be interpreted as class probability estimates. In probability-focused medical applications, it is not sufficient to perform well with regards to discrimination and, consequently, various calibration methods have been developed to enable probabilistic interpretation. The aims of the current study were 1) to develop a tool for fast and comparative analysis of different calibration methods, 2) to demonstrate their limitations for the use on clinical data, and 3) to introduce our novel method GUESS. Results We compared the performances of two different state-of-the-art calibration methods, namely histogram binning and Bayesian Binning in Quantiles (BBQ), as well as our novel method GUESS on both, simulated and real-world datasets. GUESS demonstrated calibration performance comparable to the state-of-the-art methods and always retained accurate class discrimination. GUESS showed superior calibration performance in small datasets and therefore may be an optimal calibration method for typical clinical datasets. Moreover, we provide a framework (CalibratR) for R, which can be used to identify the most suitable calibration method for novel datasets in a timely and efficient manner. Using calibrated probability estimates instead of original classifier scores will contribute to the acceptance and dissemination of machine learning based classification models in cost-sensitive applications, such as clinical research. Availability GUESS as part of CalibratR can be downloaded at CRAN. Supplementary information Supplementary data are available at Bioinformatics online.
<reponame>vittokz/dalelapata<filename>node_modules/devextreme/ui/scroll_view/scrollable.d.ts /** * DevExtreme (ui/scroll_view/scrollable.d.ts) * Version: 20.1.3 * Build date: Fri Apr 24 2020 * * Copyright (c) 2012 - 2020 Developer Express Inc. ALL RIGHTS RESERVED * Read about DevExtreme licensing here: https://js.devexpress.com/Licensing/ */ /** Warning! This type is used for internal purposes. Do not import it directly. */ export { dxScrollableOptions as Options } from './ui.scrollable';
The NFL doesn’t really break everyone—surely there’s been the odd kicking specialist who’s made it through a career unscathed. The rest, however, this league does tend to fold, spindle and mutilate. Let us lift our glasses high, then, to the Lazari—the plural of Lazarus—who have endured extraordinary despair and bounced back to reclaim, resuscitate and otherwise resurrect their careers. Overlooked and underappreciated, counted out and cut, they are survivors of the injury list and the waiver wire, possessed of equal measures talent and resilience. They are also, it turns out, more common than you might realize. Sam Bradford was dapper in a muted blue suit with a tasteful windowpane pattern. As he sat beside his locker, an hour after leading the Vikings to their fourth straight victory, on Oct. 3 against the Giants, his rising trouser cuffs revealed a fashion faux pas: Beneath his stylish tan oxfords, he wore no socks. Bradford explained, “It’s the one superstition I’ve kept for my whole career”—a span stretching back to Putnam City North High and through his prolific career at Oklahoma. As the first pick in the 2010 NFL draft, by the Rams, he set a league record for most completions by a first-year quarterback and was named Offensive Rookie of the Year. Tall and smart, with an accurate arm, able to throw with touch and power, the kid radiated success. He had a glow about him ... until he didn’t. After a season-ending ACL tear in 2013 and another in ’14, his name became synonymous with hard luck. He was traded to Chip Kelly’s Eagles and finished last season strong—but it was time, once again, for the other shoe to drop. Kelly was fired; Philadelphia moved up in the draft to snag Carson Wentz at No. 2. That vote of no-confidence provoked a rare fit of pique in the usually even-keeled Bradford, who announced he wanted a trade. Helping Bradford come to peace with being stuck in Philly, sharing the position with the team’s QB of the future, was his Christian faith in general and a passage from the Book of James in particular. “Hang on, I’ve got it right here,” Bradford says, pulling out his phone and reading: Consider it pure joy, my brothers and sisters, whenever you face trials of many kinds. Because you know the testing of your faith produces perseverance. Let perseverance finish its work, so that you may be mature and complete, not lacking anything. Perseverance, it turned out, had not finished its work. Neither had Eagles general manager Howie Roseman. Bradford had only been awake for a couple of minutes on the morning of Sept. 3 when he got a text from Philadelphia coach Doug Pederson: “We need to talk.” “I figured he was calling to let me know they’d made a roster move,” Bradford says with a wry smile—“and sure enough, he was.” The Eagles had traded him to the Vikings (whose starter, Teddy Bridgewater, had torn his left ACL on Aug. 30) in exchange for two draft picks. That was at 8 a.m. Bradford and his wife, Emma, made it onto a 1:30 p.m. flight to Minneapolis. By 4 p.m., Bradford was at the Vikings’ Eden Prairie facility, learning his fourth offense in three years. The season opener was eight days away. While Bradford was familiar with the concepts in the system of his latest offensive coordinator, Norv Turner, the verbiage was new. It helped that the trade reunited Bradford with Shaun Hill, his backup for a season in St. Louis. If you think of a new offense “as a foreign language,” says Hill, “we have that common language that we spoke” as Rams. “So we can kind of relate.” The Turners—Norv and his son, Scott, the Vikings’ QBs coach—also “code-worded” some of the more unwieldy, lengthy play names on their call sheet so that “one word equals a whole play,” says Scott. The downside of that shortcut: Everyone has to learn the code. “It puts a little more stress on the other guys.” But everyone in the huddle seems happy to do his part. What we are learning this season is that, while Bradford may have never quarterbacked a team to the playoffs (or, for that matter, a winning season), this may have had less to do with his abilities than his supporting casts in St. Louis and Philadelphia. He is No. 1 in completion percentage (70.4%) and No. 2 in passer rating (109.8), and he’s one of just two qualifying QBs who haven’t thrown an interception. Pro Football Focus rates him as the No. 3 passer in the league. And this has been without the injured Adrian Peterson. No passer gets less from his backs (2.5 yards per carry) than Bradford does. With go-to receiver Stefon Diggs out of the lineup with a groin injury in Week 5 against Houston, the imperturbable Bradford repeatedly found Adam Thielen. The onetime D-II walk-on finished with seven catches for 127 yards and a touchdown in a 31–13 spanking of the Texans, which left Minnesota as the league’s last unbeaten. Bradford has displayed a deep-ball accuracy that Bridgewater lacks, and he has, of late, defibrillated the career of kick returner/wideout/Lazarus Cordarrelle Patterson, a 2013 Pro Bowler. Despite taking numerous hard shots in his four starts—the Vikings lost both tackles to season-long injuries—Bradford “gets up every time,” says left guard Alex Boone. “He’s one of the toughest mother------s I’ve seen.” He’s also “one of the most intelligent quarterbacks I’ve ever been around,” adds six-year veteran tight end Kyle Rudolph, still marveling at Bradford’s ability to lead the Vikings to a 17–14 win over the Packers after just two weeks with the team. Before he could locate his receivers in that game, Bradford first had to locate the team’s newly minted home, U.S. Bank Stadium. After eating his pregame meal with Hill in the team hotel, he had a question for his old friend. “Um, how do you get to the stadium?” “Are you driving?” Hill asked him. Bradford was. “I’ll ride with you.” With Bradford aboard, this team is poised for a long drive through the playoffs. Of course, not all comeback journeys point to the postseason—which isn’t to say they aren’t worth celebrating. James Kenney/AP It was June 2015, Tim Cortazzo recalls, a pleasant morning in western Pennsylvania. A former receiver at Toledo, the personal trainer was working with members of the Penn-Trafford High football team when a familiar, rangy figure approached the field. “Terrelle Pryor walks over and says, ‘What’s up?’ and we start talking,” says Cortazzo. It had been eight years since Pryor’s last game at QB at nearby Jeannette High, where he was the 2007 Parade National Player of the Year. After three seasons at Ohio State and three with the Raiders, he’d been signed and cut by the Seahawks, Chiefs and Bengals. By the time Pryor walked onto the field at Penn-Trafford that day, he’d arrived at a momentous and difficult decision. “I think I’m gonna try and switch positions,” he told Cortazzo, who runs FSQ Sports Training in Trafford. “I know you played receiver. Can you help me out?” He and Pryor did drills for 30 minutes or so, until Cortazzo had to stop. He had a class to teach. Before leaving, Pryor asked him, “What are you doing tomorrow?” They got busy. Freakish athletic ability aside, the 6' 4", 223-pound Pryor was a project at his new position. “He was big, strong and fast,” says Cortazzo, but also “robotic” and “super raw.” He needed to work on everything: his hands, his releases, sinking his hips going into breaks, keeping his feet moving coming out of them. A week or two after starting with Cortazzo, Pryor signed with the Browns. “At that point, he became obsessed with [playing receiver] and started grinding.” To buoy his business, Cortazzo would post occasional videos on social media, dispatches from Pryor’s quest. Not all the feedback was gracious. “I got comments like, ’He’s got no chance,’” says Cortazzo. At one point a discouraged Pryor told his trainer, “Man, there’s not one person in this country who thinks I can do this.” Sharing that opinion, apparently, was then Browns GM Ray Farmer, who cut Pryor on Sept. 10—five days after he’d been informed he made the final roster. “I had some low moments,” Pryor says. “I don’t mean to sound arrogant, but there were [QBs] on the teams I’ve been released from who weren’t better than me. That’s when I realized: This is political; I just didn’t fit in with what [personnel people] were looking for in a QB. It was time to try something else.” Says Cortazzo, “He was back here training with us the day after he was cut.” The Browns ultimately re-signed Pryor last December, but the fact that they drafted four receivers last spring suggests they weren’t exactly banking on him. Pryor used that as inspiration. He complemented his FSQ training with two weeks in Charlotte, where he worked with future Hall of Fame receiver Randy Moss. And in August, Pryor was the talk of Cleveland’s training camp. Following his second score (a 75-yard bomb from Josh McCown) during Cleveland’s intrasquad scrimmage at the Horseshoe in Columbus, the erstwhile Jeannette Jet turned to the cheering crowd and semaphored O-H-I-O. The question, it turns out, is not whether Pryor can cut it as an NFL wideout. The question is, How soon will he make his first Pro Bowl? After logging three catches apiece in his first two games, Pryor hauled in eight balls for 144 yards in Week 3, against the Dolphins. That breakout as a receiver came on a day—irony alert—the Browns also called on him to play nine snaps at QB, where he went 3 of 5 for 35 yards rotating in with third-stringer Cody Kessler. Pryor also carried four times for 21 yards and a touchdown, and was, according to the graders at Pro Football Focus, the best Brown on the field. Six weeks into what is shaping up to be another Stephen King kind of season in Cleveland, Pryor is the lone beam of sunlight. His team-leading 33 grabs for 413 yards and three TDs (and the fact that he’s PFF’s No. 9 pass catcher) are more impressive considering that more than half of those passes have come from the team’s third- and fourth-string passers. Meanwhile, Pryor remains a de facto QB5; he took five snaps at the position in Week 5, rotating in with Charlie Whitehurst. As the praise pours in, Pryor plugs his ears, repeating his mantra that he’s just “scratched the surface” of his abilities, that he’s not satisfied with eight catches for 144 yards—“I want 15 for 270. I want to break records.” Then, following a forlorn pause, he adds, “I want to see the Browns win every game.” In reality, Cleveland will be fortunate to win even four games this season. Meanwhile, Pryor heeds the counsel of Moss, who weighs in weekly with advice on how to attack certain corners and who reminds him, “Tunnel vision.” As long as Pryor continues to instruct scout team DBs to rough him up and catches upwards of 800 balls a week, results will come. As will a dramatic pay raise after the season when he becomes a free agent. It’s tough to begrudge a man his good fortune after he’s worked so hard to prove so many people wrong. Tom Pennington/Getty Images Sport It seems fitting that in the months before Damon Harrison resurrected his football career, he worked the graveyard shift. During his first night restocking merchandise at a Walmart in Lake Charles, La., he was warned by his fellow workers: Don’t sit on the floor if you’re stocking the bottom shelves. “Of course, the first night I was there, I sat on the floor,” says Harrison, now a 6' 4", 350-pound nosetackle for the Giants, “and I ended up falling asleep in the middle of the aisle.” Forgiven his rookie mistake, he went on to flourish at Walmart, and his managers eventually offered him a full-time gig. If they’d called five minutes earlier, Harrison says, “I would’ve taken that job. That was gonna be some pretty good money.” But he’d already spoken that morning to Steven Miller, the defensive line coach at William Penn, an NAIA school in Oskaloosa, Iowa. Miller had offered him a scholarship, and Harrison accepted. If Miller had called any later, the NFL would’ve been deprived of both an elite run stuffer and one of the more sublime nicknames in sports. Damon (Snacks) Harrison and football did not get along at first. He was cut in middle school. Twice. When he went out for the freshman team at Lake Charles–Boston High, a coach asked him what position he played. The 6' 0", 200-pound newcomer explained that he liked to run the ball. So the coach put him at running back. “After that practice, man, I was hurting real, real bad,” he recalls. “And then I had to walk home.” During that stroll he decided he was a basketball player. Harrison loved hoops and played four years in high school. “I don’t want to brag on myself,” he says, as a preamble to bragging on himself, “but I could shoot the rock, man.” When a torn left meniscus forced him to the sofa for a month during his junior season, Harrison put on “30 or 40 pounds,” he says. Inspecting his new and more ursine physique, he decided to give football another go as a senior. In preseason workouts, coaches doubted he’d stick it out. “I don’t know why you’re coming out here,” one goaded him. “You’re not gonna be on this team.” He made it as the backup right tackle. When the starter got hurt, Harrison took his job and never gave it back. He began playing on the D-line, too, and was named to the all-Southwest Louisiana team. Still, he generated not a scintilla of interest from any college. Two weeks before graduating, he sent a series of hard-sell emails to a dozen coaches. Because his family had no computer, he sent them from the school library. Two coaches replied. One was Miller, then an assistant at Northwest Mississippi Community College. After taking a look at Harrison’s film, Miller offered him a scholarship. But when Harrison arrived in Senatobia, Miss., the team waffled. Harrison had never spent time in a weight room, which quickly became obvious. “You had the linemen doing sets on the bench with 225 pounds. I was over with the quarterbacks, doing sets with 135. And I could only do that about five times.” The Rangers were only allowed eight out-of-state players and—surprise!—they were overbooked. Harrison would have to “grayshirt”—not play until the following season. Instead, he put Senatobia in his rearview mirror. Back in Lake Charles, he landed at Walmart and counted himself lucky. He would stock shelves in the pets section until 3 a.m., take his “lunch” break and decamp to cosmetics. Then Miller was hired at William Penn, and he invited Harrison to join him. The coach even drove Harrison to Iowa in his Jeep Cherokee (though right up until the eve of that interminable journey, Harrison thought they were headed to Williamsburg, Va., home of the College of William & Mary). For four seasons, Harrison started every game at D-tackle for the Statesmen. As a senior, in 2012, he got a last-minute invite to a B-list all-star game in Arkansas and made a handful of tackles. A draft guru taking notes that day later wrote of him—Harrison can recite this passage from memory—“Too slow and heavy-legged to ever compete at the NFL level. At best a camp body.” Befitting an athlete whose lower body can now be fairly described as pachydermal, Harrison has the memory of an elephant. On the third day of the draft, the day he was most likely to be selected, Harrison and some friends filled a booth at a Buffalo Wild Wings in Des Moines. Plenty of scouts and coaches had told him that they had their eye on him, but when the seventh round ended, his name hadn’t been called. Sitting in the back of a van on the drive home, Harrison turned off his phone and shed what he describes—a bit angrily—as “angry tears. It had happened again. I was being told I was not good enough. Again.” When he turned his phone back on, Harrison’s agent told him he was going to the Jets as an undrafted free agent. To manage his expectations, he was warned that New York had just re-signed one nosetackle and had drafted another in 2011. He would probably be fighting for a spot on the practice squad. “I hate it when someone tells me what I can and cannot do,” says Harrison, whose stout play in training camp forced the Jets to keep three nosetackles that fall. A year later he was starting. After last season, PFF rated him the NFL’s second-best nosetackle and the single best defender against the run. Last March he signed a five-year, $46.5 million deal ($24 million guaranteed) with the Giants, whose defense has gone from allowing 4.4 yards per carry in 2015 (24th in the NFL) to 3.5 (sixth) in ’16. Had Miller not called in the nick of time, Harrison would have had to work 930 years at Walmart, making $50,000 annually, to equal that contract. Even elephants don’t live that long. Larry French/Getty Images Sport Dennis Pitta remembered. He knew, as he was being carted off the field in Cleveland on Sept. 14, 2014, that he’d dislocated his right hip, just as he’d dislocated it 14 months earlier, in training camp. He knew team doctors would now attempt to pop the head of his femur back into its socket, as they had on the practice field in Owings Mills, Md. This time, he recalls, they waited until the medic cart was “just inside the tunnel,” so the crowd wouldn’t see them manhandling the leg. “I don’t think I’ve experienced anything more painful,” says the seventh-year tight end. To hear him expound on hip anatomy, on his two surgeries and on the perils of avascular necrosis, which afflicted Bo Jackson but not Pitta, is to realize that his self-description as a “hip specialist” on his Twitter bio is not entirely in jest. After he’d dislocated the same hip twice in just over a year, doctors told him he was done with football. Pitta agreed. Or at least he pretended to agree while he rehabbed from the second injury. “Based on what everybody was telling me,” he says, “I had very little hope of returning.” You couldn’t blame the Ravens for writing him off. Which they did. Baltimore drafted two tight ends in 2015, then added another, free agent Ben Watson, last March. Pitta, for all anyone knew, was rehabbing “to be able to run around and play with my kids and feel normal.” Yet there he was on the practice field last October, catching passes from Joe Flacco. “I was running well and cutting well,” he recalls, “but I didn’t quite feel 100%.” Pitta was shut down for the rest of the season. And then there he was, reporting to training camp in July. And there he was on Sept. 18, back in Cleveland, 728 days after that cart ride. This time, after catching nine passes for 102 yards in a 25–20 Ravens win, he walked off the field. Four weeks later, his 34 catches are second most among NFL tight ends. He’s on pace for 91 grabs, which would smash his career high of 61 in 2012. Like his fellow Lazari, Pitta had made it all the way back. Perseverance had finished its work.
def read_PSF(self, filepath): hdu = fits.open(filepath) self.PSF = hdu[0].data
Emerging Problems in Infectious Diseases Microbiological characteristics of hypermucoviscous Klebsiella pneumoniae isolates from different body fluids Introduction: Reports of hypermucoviscous Klebsiella pneumoniae (hvKP) isolated from fluids other than blood or abscess are rare. The aim of the study was to compare clinical and microbiological characteristics of hvKP found in blood or abscess fluid with those isolated from other loci. Methodology: A total of 24 non-repetitive hvKP isolates were collected from January 2013 to June 2014 from patients with hvKP infections. There were 15 in Group 1 (fluid other than blood or abscess) and 9 in Group 2 (blood or abscess fluid). Medical records of all patients were reviewed. Capsular polysaccharide (CPS) typing, virulence factor determination, and multilocus sequence typing (MLST) of hvKP isolates were performed. Results: Seventeen sequence types (STs) and 6 capsular serotypes were identified. Type K2 was most commonly identified in Group 1 and type K2 in Group 2. Deletion of pLVPK-derived loci were found in K2 and non-K1/K2 hvKP strains. Two virulent genes, fimH and ycfM, were identified more frequently in Group 2 than in Group 1. There was no difference in the frequency of other virulent genes or serotypes in the two groups. Two imipenem resistant hvKP isolates (cr-hvKP) were found in non-blood or abscess samples. Conclusions: hvKP isolated from different body fluids had similar clinical and microbiological characteristics. cr-hvKP identified in non-blood or abscess samples should raise our attention to the challenging situation and management of hvKP infection. The hypermucoviscous (hv) phenotype is a distinguishing factor of hvKP strains. hvKP is considered to be more virulent than hv-negative strains and this phenotype is commonly found in K. pneumoniae strains which cause CA-PLA and BSI. More importantly, they exhibit a higher tendency to metastatic spread. These two traits contribute to the high mortality rate associated with hvKP infection. hvKP is more resistant to phagocytosis by polymorphonucleated neutrophils, less sensitive to killing by serum complement, and more virulent in animal studies. Previous reports have focused on hvKP infections isolated from abscess and blood fluids. We isolated hvKP from urine and tracheal secretions, evaluated their clinical and microbiological characteristics, and compared them to those of isolates obtained from blood and abscess fluid. Bacterial isolates All isolates were obtained from patients seen at Peking University Third Hospital (PUTH), a universityaffiliated medical center with a 1,498-bed capacity and 79,000 hospital admissions per year. A total of 533 non-repetitive K. pneumoniae clinical isolates were identified by VITEK GN card (bioMe'rieux, Marcy l'Etoile, France) from January 2013 to June 2014. All samples were stored at −80°C prior to genetic and virulence testing. An hv phenotype test was performed on all isolates to distinguish hvKP from classic K. pneumoniae (cKP). The presence of a viscous string greater than 5 mm in length with a bacteriology inoculation loop on bacterial cultures grown overnight on 5% sheep blood agar plate at 37°C was regarded as diagnostic of hvKP. The first hvKP isolate from each patient was used for further investigation. hvKP isolated from asymptomatic patients was classified as colonization and was not included in this study. Infection was considered to be nosocomial if it was diagnosed ≥48 h after admission and the patient had no evidence of clinical infection at the time of admission. Symptomatic infections diagnosed within 48h of admission were considered to be community acquired. hvKP strains not isolated from blood or abscess fluid were Group 1. hvKP strains isolated from blood or abscess fluid were Group 2. The BLAST program at http://www.ncbi.nlm.nih.gov was used for final serotype identification. MLST was performed according the protocols for K. pneumoniae provided on the MLST website (http://bigsdb.web.pasteur.fr/klebsiella/klebsiella.html). MLST allelic profiles were characterized using the MLST database. Sequence types (STs) were analyzed using eBURST version 3.0. Clonal complexes (CCs) were defined as groups of two or more isolates sharing at least 6 identical alleles. Statistical analysis Data were analyzed using the statistical package SPSS 17.0 for Windows. The Fisher's exact test or Chisquare test were used for categorical variables. All statistical tests were 2 tailed and a p value ≤0.05 was considered statistically significant. Descriptive data were reported as mean ± SD. Ethical approval The study was approved by the Institutional Review Board of PUTH. Men older than 60 years were predominantly affected with community acquired infections. Patients were generally toxic on admission and frequently required invasive monitoring or care. Over 80% of patients recovered and were discharged. The clinical characteristics of each hvKP group were similar ( Table 2). Discussion All hvKP isolates obtained from a large urban hospital during two years were evaluated in our laboratory. The infrequency of this infection led to the identification of only 24 hvKP isolates from 24 patients during a 2-year period. hvKP isolates comprised 7.5% of all KP isolates from our hospital, 60% of which were associated with infection. We identified hvKP more frequently in urine and abscess, and less frequently in blood, tracheal secretion, and bile than another similar large hospital in China. Most community-acquired infections were blood infections or a presence of different abscess, similar to previous reports. Out of all, 40% (6/15) hvKP infections in nonblood/abscess group were defined as hospital-acquired versus 11.1% (1/9) in blood/abscess group. This may suggest an elevated risk for hospitalized patients and the potential dissemination of hvKP strains in health care facilities. Colonization with hvKP is thought to be a first step in developing infection. Patients with pyogenic liver abscesses have been reported to have frequent intestinal (81.4%) and/or pharyngeal (39.5%) colonization with hvKP. We found hvKP colonization in 16 patients. Colonized fluids included tracheal secretions, urine and superficial secretions. Factors leading from colonization to infection are not well understood. The presence of hvKP in the urine may be a potential marker for bacteremia. Therefore, at the time of hvKP isolation, strict adherence to standard hospital infection control precautions should be reinforced to limit its spread. The hvKP that we evaluated had similar antibiotic resistance patterns as previous reports. An exception to this finding was the presence of carbapenem resistance identified in hvKP isolated from tracheal secretions and urine of patients in Group 1. These two isolates were characterized as K2/ST25 and K2/ST65 with blaKPC-2, respectively. The study from China revealed 5 K1 cr-hvKP isolates with plasmid-borne blaKPC-2 gene and genetic relation between 3 of them. The development of resistant strains of hvKP demonstrates a need for avoiding unnecessary antibiotic use that stimulates the generation of resistant strains. This problem is emphasized by a cr-hvKP isolate obtained from an elderly man receiving chemotherapy who was previously treated because of another systemic infection before developing a fatal cr-hvKP infection. The bacterial capsule is an important virulence factor and serotypes K1 and K2 have been particularly linked to severe bacteraemia and liver abscess. These findings have rarely been reported in fluids other than blood and /abscess. In the present study, no difference was found in the distribution of capsular serotypes of blood/abscess and non-blood/abscess groups. K. pneumoniae virulence determinant pLVPK is a 219,385-bp plasmid isolated from the invasive K2 strain CG43. pLVPK-derived terW-rmpA-iutA-silS loci are independent pathogenicity factors for abscess formation. pLVPK derivatives could be extrachromosomal and carry the repA gene, or be a chromosome-integrated form. Gene deficiency of pLVPK derivatives was found for the first time in the hvKP isolates except K1 type. entB, iroN, ybtS, kfuBC, and iutA are part of iron scavenging systems that contribute to bacterial virulence. Similar expression patterns were found in both groups. Both, fimH, which encodes type 1 fimbriae adhesion and ycfM, which encodes an outer membrane lipoprotein, were more common in group 2 and could also contribute to bacterial virulence. There were several limitations to this study. The small number of hvKP isolates obtained does not support subgroup analyses. Not all patients were tested for colonization at admission and there was limited testing of colonization sites. The use of 48 hours as a cut-off for diagnosing nosocomial infections was arbitrary and could led to an overestimation of nosocomial infections. However, the frequency of community acquired hvKP infections in our patients was similar to that reported from other centers. Conclusions This study is unique in comparing the clinical and microbiological characteristics of hvKP isolates obtained from blood and abscess to those obtained from other fluids. Bacteria isolated from all areas had similar clinical characteristics and virulence characteristics. Antibiotic resistance is a developing problem with hvKP.
import React from 'react' import styled, { keyframes } from 'styled-components' import CardIcon from '../CardIcon' import Logo from '../../assets/img/logo-icon.svg' interface LoaderProps { text?: string } const Loader: React.FC<LoaderProps> = ({ text }) => { return ( <StyledLoader> <CardIcon> <StyledPBR><img src={Logo} alt="PolkaBridge Launchpad"/></StyledPBR> </CardIcon> {!!text && <StyledText>{text}</StyledText>} </StyledLoader> ) } const spin = keyframes` 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } ` const scale = keyframes ` 0% {transform: scale(.8);} 100% {transform: scale(1);} ` const StyledLoader = styled.div` align-items: center; display: flex; flex-direction: column; justify-content: center; ` const StyledPBR = styled.div` font-size: 32px; position: relative; animation: 0.8s ${scale} infinite; ` const StyledText = styled.div` color: ${(props) => props.theme.color.grey[100]}; ` export default Loader
Iran is reporting huge new discoveries of oil that contain "billions" of barrels of reserves, state radio quoted the managing director of the country's National Oil Co. as saying today. Seyfollah Jashnsaz said Iran has discovered seven new oil fields in unspecified locations around the country. "Billions of barrels of oil will be added to the country's existing oil reserves," he said in comments broadcast on state radio. According to Jashnsaz, just one of these oil fields has 9 billion barrels of oil. "Even if we make calculations based on the minimum 12% recovery rate," he said, "it means that 1 billion barrels of oil can be recovered from this field alone." He added that further details of the find will be announced by the country's minister of oil in coming days.
package com.play.withus.fundamentals.lesson8; //statics // static is modifier // it means that the method/property it belongs to the class // and its shared by all the instances of that class // can be accessed without instantiating the class public class ItemSize { static final String medium = "M"; static final String large = "L"; }
It was Conan and the barbarians here last night. When Conan O'Brien, the unknown picked to replace David Letterman as NBC's late-night talk show host, walked into the Rainbow Room atop the Rockefeller Center for a news conference, a pack of about 30 photographers rushed forward, surrounded him and spent the next five minutes jostling, shooting and shouting "Conan, Conan, look this way. Conan, Conan, smile." Among them was renowned photographer Annie Leibowitz. That's the kind of attention the 30-year-old O'Brien has been getting since NBC announced last week that he would take over the late-night spot in August when Letterman leaves for CBS. "I'm going from unknown to relative unknown overnight," O'Brien said amid all the clicking and whirring. "I want to thank the press for that." While the news media may be keenly interested in O'Brien, early analyses have not been great. USA Today called O'Brien "Conan Who." And Entertainment Weekly said, "Here's a confidence builder: He [O'Brien] was chosen by the same empty suits who let Letterman get away." O'Brien was noticeably nervous and tried too hard to be funny once the news conference started. But some of his comments were clever and intriguing; it might be worth giving him a chance to perform before writing him off as the latest knuckleheaded move by NBC. "My feeling is that I'm not replacing David Letterman," O'Brien said in response to repeated questions about whether he would be borrowing such Letterman staples as "Stupid Pet Tricks." "David Letterman is still going to be around, so I'm not going to do mycheesy version of the David Letterman show. . . . I'm not going to try and copy it or knock it off. "What I'd like to do is try and do a talk show. When you're doing a show five nights a week, you can't generate all original material. . . . So, I don't want people to think this is going to be this all-new breakthrough thing where I'm going to be underwater and painted blue and the guests are going to be asking me questions or something. It's not going to be that. What it is going to be in some ways is a conventional talk show. "But what I'd like to do in that talk show format is try some new things, experiment. My feeling about all of this is that it's a little bit of a responsibility. I'm only 30 years old, and not many people of my generation get to do this kind of thing. I'm being given an opportunity. If I go out and do exactly the same thing that everybody else is doing and don't take any chances, I think I'm blowing an opportunity. . . . But I'm definitely not going to try and replace Letterman. O'Brien said that Letterman had invited to him to appear on his show, and that he would so so tonight. of the nicest things that's happened to me since this announcement." Some of O'Brien's best moments came in response to a question about how he felt about being second choice, a reference to reports that NBC first offered the job to Garry Shandling. Shandling reportedly turned down an offer of four years at $20 million. O'Brien said yesterday that he has a five-year contract but declined to give an amount. "Do I mind? No, I think it's realistic. As I understand it now, in fact, I barely beat out Norman Fell [a character actor on such sitcoms as 'Three's Company']," O'Brien said to much laughter. "I mean, it made perfect sense to me. If I were them, my first choice would not be, 'Let's get Conan. He's a funny guy. He's had no experience. He doesn't have a suit or anything.' No, I think it's perfectly logical that they went for Shandling first." O'Brien, a Harvard graduate, has no performing experience except for a few brief appearances in "Saturday Night Live" sketches when he was a writer for the show in the late '80s. Virtually all of his experience is as a writer and producer. In 1991, he joined "The Simpsons" as writer and producer, a job he held until last week. NBC is trying to do its best to make O'Brien a household name and to make its affiliates feel more comfortable about an unknown taking over a show that generated $67.5 million in ad revenue last year. Virtually all of NBC's top brass were on hand yesterday, standing nervously to the side of the stage at the Rainbow Room and watching O'Brien. The group included NBC President Bob Wright, Entertainment President Warren Littlefield and Lorne Michaels, who had picked O'Brien for the job. In addition to appearing tonight on "Late Night With David Letterman," O'Brien will be on "Today" this morning. After the news conference last night, he appeared with Tom Snyder on cable channel CNBC. How did the news conference go from NBC's point of view? Standing near a bank of elevators in 30 Rockefeller Plaza afterward, two senior executives were talking: "Look at this way," one said. "When CBS introduced Pat Sajak and his late-night show, Annie Leibowitz sure as hell wasn't at the press conference."
<gh_stars>0 /* */ package org.springframework.web.context.request.async; /* */ /* */ import org.springframework.web.context.request.NativeWebRequest; /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ public interface DeferredResultProcessingInterceptor /* */ { /* */ default <T> void beforeConcurrentHandling(NativeWebRequest request, DeferredResult<T> deferredResult) throws Exception {} /* */ /* */ default <T> void preProcess(NativeWebRequest request, DeferredResult<T> deferredResult) throws Exception {} /* */ /* */ default <T> void postProcess(NativeWebRequest request, DeferredResult<T> deferredResult, Object concurrentResult) throws Exception {} /* */ /* */ default <T> boolean handleTimeout(NativeWebRequest request, DeferredResult<T> deferredResult) throws Exception { /* 104 */ return true; /* */ } /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ /* */ default <T> boolean handleError(NativeWebRequest request, DeferredResult<T> deferredResult, Throwable t) throws Exception { /* 124 */ return true; /* */ } /* */ /* */ default <T> void afterCompletion(NativeWebRequest request, DeferredResult<T> deferredResult) throws Exception {} /* */ } /* Location: /home/kali/ctf/htb/fatty-10.10.10.174/ftp/fatty-client.jar!/org/springframework/web/context/request/async/DeferredResultProcessingInterceptor.class * Java compiler version: 8 (52.0) * JD-Core Version: 1.1.3 */
/** * Call this instead of {@link Bindings#standard()} to inject the custom bindings. */ public static Bindings extendedBindings() { List<BindingWiring> wirings = Lists.newArrayList(Bindings.STANDARD_BINDINGS); wirings.add(new LoggedModel.Wiring()); return new Bindings(wirings); }
Generation and use of functionalised hydrogels that can rapidly sample infected surfaces This paper outlined our method for developing polymer-linked contact lens type materials for rapid detection and differentiation of Gram-positive, Gram-negative bacteria and fungi in infected corneas. It can be applied to both model synthetic or ex-vivo corneal models and has been successfully trialed in an initial efficacy tested animal study. First a hydrogel substrate for the swab material is selected, we have demonstrated selective swabs using a glycerol monomethacrylate hydrogel. Alternatively any commercial material with carboxylic acid functional groups is suitable but risks nonspecific adhesion. This is then functionalised via use of N-hydroxysuccinimide reaction with amine groups on the specified highly branched polymer ligand (either individually gram negative, gram positive or fungal binding polymers or a combination of all three can be employed for desired sensing application). The hydrogel is then cut into swabs suitable for sampling, used, and then the presence of gram positive, game negative and fungi are disclosed by the sequential addition of dyes (fluorescent vancomycin, fluorescein isothiocyanate and calcofluor white). In summary this method presents: Method to produce glycerol monomethacrylate hydrogels to minimize nonspecific binding Methods of attaching pathogen binding highly branched polymers to produce selective hydrogel swabs Method for disclosing bound pathogens to this swab using sequential dye addition Introduction n warm, temperate climates there is an increased incidence of microbial keratitis, an infection of the cornea, making it one of the leading causes of vision loss in many countries. Early and rapid diagnosis is imperative for effective and appropriate treatment. However, the remote locations of treatment centres means diagnosis is slow and often broad spectrum antibiotics are prescribed, contributing to an increase in antibiotic resistance. The aim of this study is to fabricate polymer-linked contact lens type hydrogels and use them for rapid detection of both Gram-positive, Gram-negative bacteria and fungi in infected cornea. The method is demonstrated using both rabbit and human exvivo corneas and has been replicated in animal safety trials and should be suitable for other skin or wound surface sampling as required. This method of rapid detection and disclosure is of vital importance as there is approximately a 10-30 times greater incidence of corneal ulceration in developing countries than more industrialised, developed countries, where work in agricultural settings and a high prevalence of home remedies can exacerbate infectious diseases. The most commonly isolated bacterial species from corneal scrapings include Staphylococcus aureus, Staphylococcus epidermidis, Streptococcus pneumonia and Pseudomonas aeruginosa and the major fungal isolates include Fusarium solani, Candida albicans and Aspergillus fumigatus. The key to effective management of disease is early diagnosis and appropriate treatment. However, early diagnosis in these regions of the world is difficult due to the remote locations of treatment centres. The gold standard for identifying the infecting organism is microbial culture. However, this can be slow because cultures must be performed at a central centre and the limited number of these centres mean that diagnosis is not rapid. The result of this is that clinicians need to treat the infection immediately to preserve sight without any scientific indicators of the pathogen strain, and therefore the preferential treatment option. Therefore, broad spectrum antibiotics are given until the appropriate treatment is found which depends on the infecting organism. This high incidence of inappropriate treatment is associated with increased microbial resistance. Consequently, there is a pressing need for a novel inexpensive and rapid detection system, suitable for use at remote treatment centres, that would allow early diagnosis and dictate the appropriate treatment course. Several of these systems employ fluorescent dyes that specifically target gram positive however it is a challenge to find fluorescent stains that can provide the level of discrimination for the vast array of pathogenic microbial species desired. Our method achieves this by attaching a highly branched polymer additive functionalized with vancomycin (van), polymyxin (pmx) and amphotericin (amp) ligands to a hydrogel sheet that is cut into the size and shape suitable for placement on a human eye. The material must contain a high degree of amine functionality in order to form a strong bond with the binding polymer, but we have demonstrated how any carboxylic acid functional contact lens (such as any typical commercial hydrogel contact lens) can be used with a two-step modification although this may impact both the selectivity of the system and the choice of disclosing dyes. Clinical isolates of S. aureus, P. aeruginosa and C. albicans (1 10 8 ) were directly incubated with polymer-linked lenses before being washed and imaged using a light or fluorescent microscope or finely minced and the numbers of viable bacteria enumerated. These strains were selected to be representative of the major broad categories of infectious pathogens in wound care. We have outlined this method using both directly synthesized glycerol monomethacrylate (GMMA) Hydrogel modification (section 1) and commercial contact lenses (section 2). The first method builds on our published materials and provide the steps outlined to prepare highly specified detection of specific bacterial species whilst section 2 shows a relatively simple modification to the published methodology which would allow for functionalization of a vast array of other commercial hydrogel products to create pathogenic sensor materials -at the sacrifice of some specificity as described in the provided method validation data. Section 1: fabricated glycerol monomethacrylate hydrogel method Glycerol monomethacrylate (GMMA) (5 g, 4.660 ml), Glycidyl methacrylate (GME)) (0.345 g, 0.321 ml) and ethylene glycol dimethacrylate (EGDMA) (0.206 g, 0.196 ml) were degassed via bubbling dry nitrogen through solution whilst stirring in isopropanol (2 ml) for twenty minutes. 2-hydroxy-2methylpropiphenone (HMPP) (55 mg) was added and the solution degassed for a further five minutes before it was extracted using a glass syringe and directly injected into a quartz plate mould separated with a 0.5 mm PTFE gasket. The two quartz plates were laminated with poly (ethylene terephthalate) sheet, which was adhered to inner surfaces of the glass, to aid the release of the produced polymer sheet. To initiate polymerisation the mould was irradiated by a 400-w metal halide UV-A lamp for 3 minutes before being turned over and irradiated on the alternate side for a further 3 minutes. The cured hydrogel sheet was then removed and immersed in isopropanol. The hydrogel sheet was washed a total of five times with fresh isopropanol and left for at least 1 hour each time before being added to a 1,3-diaminopropane solution in isopropanol (20% v/v, 250 ml) solution for 48 hours, being inverted halfway through. It was then washed and immersed for 1 hour in isopropanol a further two times. The hydrogel was characterised by measurement of equilibrium water content (EWC = 61%, SD = 4%, n = 12). Fourier Transform Infrared spectroscopy (FTIR) was used to analyse for residual monomer leaching and the material was imaged using scanning electron microscopy. Aminated hydrogels were exposed to HB-PNIPAM-X (50 mg), where X is either van, pmx or amp, dissolved in isopropanol (100 ml). The hydrogel sheets were immersed for 48 hours on a low-speed shaker with inversion after 24 hours When the polymers had reacted, the sheet was washed with isopropanol for one hour. The isopropanol was refreshed, left for a further hour. To deprotect the HB-PNIPAM-pmx (removal of FMoC groups) 20 ml of piperidine in isopropanol (20% v/v) was added to the hydrogel sheet for 48 hours before being washed in pure isopropanol for an hour, three further times. Polymer films were characterised by assessing equilibrium water content (EWC) (see Table 1) polymer loading by UV-absorbance and Vancomycin ELISA and FTIR. Full characterisation details are shown in the supporting information. To produce a tri-functional hydrogel the aminated hydrogel discs (5 mm diameter) were exposed to a mixture of HB-PNIPAM-van (50 mg), HB-PNIPAM-pmx (100 mg) and HB-PNIPAM-amp (60 mg) dissolved in isopropanol (100 ml). Hydrogels containing other amounts are disclosed in the supporting information. These exposed discs are described as triple functional hydrogels in this work. The hydrogel sheet was left immersed in this mixture for 48 hours on a slow speed shaker and the hydrogel inverted halfway through. When the polymers had reacted, the sheet was washed with isopropanol for one hour. The isopropanol was refreshed, left for a further hour. To deprotect the HB-PNIPAM-pmx (removal of FMoC groups) 20 ml of piperidine in isopropanol (20% v/v) was added to the hydrogel sheet for 48 hours before being washed in pure isopropanol for an hour, three further times. Prior to use all hydrogels were washed three times in PBS and then incubated in media and hydrogels were characterised via the same methods shown above. Location: Must be carried out in an extracted fume hood Procedure (Step 1): Into a round-bottomed flask, combine GMMA (5 g, 4.660 ml), GME (0.345 g, 0.321 ml) and EDGMA (0.206 g, 0.196 ml) with isopropanol (2 ml). Add a magnetic stirrer bar and begin stirring. Degas the solution by bubbling dry nitrogen through it for 20 minutes. Add HMPP (55 mg). Degas for a further 5 minutes. Inject the monomer mixture into a mold consisting of two quartz plates lined with PET sheets, separated by a 0.5 mm PTFE gasket. Follow instructions on Diagram 1. Irradiate the mold containing the monomer mixture using a 400W metal Halide UV-A lamp for 3 minutes on each side. Remove the cured hydrogel sheet from the mold and place in isopropanol. Wash the hydrogel sheet a total of 5 times in isopropanol for at least 1 hour per washing. Add the hydrogel to a solution of 1,3-diaminopropane in isopropanol (20% v/v, 250 ml) for 24 hours. Flip over the hydrogel sheet and leave in the diamine solution for a further 24 hours. Remove the hydrogel from the solution and wash twice in pure isopropanol twice for a minimum of 1 hour. Check for the presence of residual monomer by soaking a sample of the hydrogel in methanol for 12 hours then carrying out GC analysis on the methanol supernatant. Determine the equilibrium water content (steps outlined separately in supporting information) of the hydrogel by sampling it. Step 1 Pass/ fail criteria: Films are stable and do not decompose and EWC lies between 55 and 70. Procedure (Step 2): In a sealable plastic container that is large enough to fit a hydrogel sheet flat against the bottom, Dissolve 50 mg vancomycin functional polymer, 100 mg polymyxin functional polymer and 60 mg amphotericin functional polymer in 100 ml IPA. Place a hydrogel sheet (SOP 24) into the polymer solution. Place the plastic container on top of a shaker and shake for 24 hours at the lowest speed. Remove the gel from the solution and flip over. Replace on the shaker. Shake at minimum speed for 24 hours. Ex vivo rabbit (A) /Human (B) corneas were infected with S. aureus, P.aeruginosa or C.albicans for 24 hours, washed and exposed to a dual functionalised bacterial hydrogel for 1 hr. Hydrogels with S. aureus were detected using fluorescent vancomycin and hydrogels with P.aeruginosa were detected using FITC. Both hydrogels were blocked using periodic acid and Schiff's reagent prior to staining. Hydrogels with C.albicans were detected using Calcofluor White. Images show S. aureus, P.aeruginosa and C.albicans bound to hydrogels removed from infected ex vivo corneas. Remove the gel from the polymer solution and place in a container containing pure IPA for 1 hour. Repeat the washing in step 6. Deprotect the hydrogel (steps outlined separately below). Measure the equilibrium water content and FTIR spectrum Step 2 Pass Criteria : Films are stable and do not decompose, EWC lies between 40 and 65. This procedure above outlines the creation of a triple functionalized polymer where the loading of vancomycin, polymyxin and amphotericin functional material has been optimized to give significant and detectible binding of all three strains of pathogen. The loadings of polymer can be easily altered or you can specify just one or two of the three strains of drug functionalized branched polymer to provide increased specificity to the diagnostic. Hydrogel Deprotection Protocol Expanded: Add 20 ml of piperidine in isopropyl alcohol (20% v/v) to a sealable container. Add the functionalized hydrogel sheet and ensure that it is fully submerged. Leave overnight Flip the sheet and leave overnight. After one hour the lenses were washed 3x in PBS and incubated with EtBr, DAPI or PBS for 30 minutes and imaged using a UV light box. Contact lenses, both with and without bacteria, glowed to the same extent under UV light suggesting that this method of detection may not be useful due to the high degree of background staining. b) Acrylamide based hydrogels show less background staining with DAPI than commercially available contact lenses. Non modified, non-functionalised commercial contact lenses and plain non-modified acrylamide gel were incubated with or without DAPI for 5 minutes, washed PBS 3 times and viewed under a UV light box. The commercial contact lens stained with DAPI, suggesting high background, whereas, when incubated with DAPI, the acrylamide gel did not have much background staining suggesting that using a different substrate for the polymer carrier might be advantageous in giving more options for dye selection. c) MTT staining can detect S. aureus after 1 hr incubation and P.aeruginosa and C.albicans after overnight incubation. Commercial contact lenses functionalised with vancomycin, polymxin and amphotericin polymers were incubated with or without 10 8 S. aureus, P.aeruginosa or C.albicans for one hour respectively. Lenses were washed 3 times with PBS and 0.5 mg ml −1 MTT solution added to the lenses for 1hr for S. aureus, overnight for P.aeruginosa and C.albicans. Remove the hydrogel from the solution and place in a container containing pure IPA for a minimum of 1 hour to wash the gel. Repeat the washing process two more times This is a necessary process to remove any remaining Fmoc protecting groups on the polymyxin functionalized polymer. It can be easily adapted for the suspended polymer solution if you wish to deprotect the polymer and use separately from the hydrogel device -however once deprotected the polymer cannot be later attached to a surface the succinimide reaction will destroy the polymyxin drug functionality. The polymyxin polymer itself is perfectly soluble in methanol, chloroform or water at low temperature after the Fmoc protecting groups have been removed and so can be separated from the raw polymer material by simple filtration. Packaging of discs We have had success storing and shipping these materials internationally by following the following procedure to ensure both product stability and stability are maintained: Soak the hydrogel sheets in ethanol for a minimum of 1 hour. Select the appropriate size of cork borer (3 mm, 5 mm or 10 mm depending on target profile. For animal trial experiment use smaller size dependent on instructions; for human clinical trials use 10 mm size). Wipe a cutting board with 70% ethanol solution to disinfect. Use the cork borer to cut the desired number of hydrogel disks from the sheet. Place each hydrogel disk in a separate bijou container containing IPA. Label each bijou container with the sample code, date and number (e.g., 1 of 6, 2 of 6 etc.) On arrival at destination the following steps were undertaken to unpack and prepare them for longer term storage. For polymer (powder) samples: Place polymers received freeze-dried directly into -20 °C storage until required For use in assays, allow the polymers in glass vials to equilibrate to room temperature before opening (approximately half an hour) Do not leave out of the freezer for longer than one hour For hydrogel Samples on arrival: Wash hydrogels, received in polymer solution, in isopropyl alcohol (IPA) twice and store in IPA at 4 °C Before use in biological assays, wash the hydrogels (approx. 3 2cm (for 1 experiment) 2 times in IPA then 3 times in PBS (5-minute washes each) If additional hydrogel is removed and washed just before an assay and is subsequently not used, return it to a labelled glass vial containing IPA and store 4 °C so it is clear which hydrogel has previously been washed in case this affects assays. Make a note of what 'pot' the hydrogel is taken from before each assay. Treat previously washed hydrogel as step 2 when using in subsequent experiments. Do not leave out of the fridge for longer than half an hour were used. All bacterial and fungal strains were cultured on brain-heart infusion (BHI) agar at 37 °C overnight and then maintained at 4 °C. For use in experiments one colony was sub-cultured from agar into BHI broth and incubated overnight at 37 °C. Stationary-phase microbes were used in rabbit cornea experiments. For human corneal experiments, on the day of corneal inoculation, a fresh broth was inoculated, and exponential-phase bacteria/fungi were used based on predetermined growth curves. Biological testing of materials A sample of materials from each batch produced during this study was kept for biological testing -both to ensure sterility (as a means of fully sterilizing the product post modification could not be found which would not negatively impact the functionality of the polymer coating) and functionality. Some of the standard methods employed are listed below. To evaluate binding of microorganisms to functionalised polymer hydrogels the following experiments were conducted: a) in-vitro interaction of microorganisms to individual polymer-linked hydrogels. b) Interaction and binding of organisms to hydrogel with all three functionalised polymers. c) Assessment of the limit of attachment of microbes. d) Determination of time duration for which the hydrogel needs to be placed on the cornea for optimal attachment. e) Assessment of safety and efficacy of the triple hydrogel in-vivo in rabbits 10 8 fluorescein isothiocyanate (FITC) labelled S. aureus, P. aeruginosa or C. albicans were incubated in-vitro with vancomycin-, polymyxin-or amphotericin B-functionalised polymers tagged on GMMA hydrogels respectively or triple hydrogels (all three agents) discs of 5 mm diameter for 1 hour. Hydrogels were washed 3 times with PBS, then imaged using a fluorescence microscope (Axiovert 200M, Zeiss). 8 fields of view were imaged and the number of organisms attaching to the hydrogels per field of view were analyzed using Image J and the imaging software AxioVision Rel. 4.8 in UK and ProgRes CapturePro 2.5 software (Jenoptik) in India. The number of organisms bound/attached to the functionalized hydrogels were compared with a non-functionalized hydrogel. Single and triple functionalised hydrogels were placed for 60 minutes onto rabbit and human corneas that had been infected with 10 8 S. aureus, P. aeruginosa or C. albicans. Hydrogels were picked up with sterile forceps, washed twice with PBS and stained with fluorescent dyes. Prior to staining with fluorescent Vancomycin or FITC, hydrogels were reacted with 0.1% periodic acid (Sigma) for 10 min, washed twice with PBS and then incubated with Schiff's reagent for 10 min before washing twice again. Hydrogels were incubated for 10 minutes with vancomycin Bodipy®FL conjugate (2 g ml −1 ; FL-Vanc; ThermoFisher) for visualisation of Gram-positive ( S. aureus ) organisms, with FITC (0.5 mg ml −1 ) for Gram-negative organisms ( P. aeruginosa ) and with Calcofluor white using a 1:1 solution of Calcofluor white ready to use solution and 10% potassium hydroxide for visualisation of fungi. After incubation, the hydrogels were washed 3x in PBS and viewed under fluorescent microscope. To assess the sensitivity of the functionalised hydrogels increasing numbers of S. aureus, P. aeruginosa or C. albicans were incubated in-vitro with triple-functionalised hydrogels for 1 hour. The hydrogels were washed, and the total ATP content determined using the ENLITEN® ATP assay kit according to the manufacturer's instructions. In another set of experiments increasing numbers of each organism were incubated in-vitro with triple functionalised hydrogels for 1 hour. Hydrogels were washed and then examined with a fluorescence microscope and the number of organisms per field of view counted. The data were compiled as mean + SD of 8 fields of view per hydrogel from at least 3 independent experiments. Optimal time measurements for hydrogel placement were carried out using our ex-vivo cornea infection model described earlier. Human corneas were mono-infected with S. aureus, P. aeruginosa or C. albicans. Triple functionalized hydrogels were placed on to these infected corneas and left in place and it was found that a period of 30 minutes was sufficient length to bind sufficient numbers of micro-organisms from the ex-vivo lens that provided a statistically significant outcome under analysis. A three step process was found to identify the three targeted strains of bacteria for each contact lens using fluorescent vancomycin, Detection of S. aureus using fluorescent vancomycin Grow an overnight BHI broth culture of S. aureus at 37 °C Count the number of cells and adjust the number to 10 8 in 1 ml PBS Add 100 l of bacteria to wounded (using scalpel blade no. 22 to make 3 horizontal and 3 vertical slashes across the surface) ex vivo corneas (using a metal ring to ensure a tight seal) Incubate overnight at 37 °C Wash corneas once with PBS Cut a hydrogel disc of 1 cm diameter using a cork borer and place onto the surface of the cornea for 1 hour Remove hydrogel and place into a 24 well plate containing 1 ml 1 % (w/v) periodic acid for 10 min Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Place into a 24 well plate containing 1 ml Schiff's reagent for 10 min Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Place into a 24 well plate containing 1 ml of 2 gml −1 fluorescent vancomycin Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Visualise staining using a fluorescent microscope Total time for the procedure is approx. 55 minutes Detection of P. aeruginosa using FITC Grow an overnight BHI broth culture of P. aeruginosa at 37 °C Count the number of cells and adjust the number to 10 8 in 1 ml PBS Add 100 l of bacteria to wounded (using scalpel blade no. 22 to make 3 horizontal and 3 vertical slashes across the surface) ex vivo corneas (using a metal ring to ensure a tight seal) Incubate overnight at 37 °C Wash corneas once with PBS Cut a hydrogel disc of 1 cm diameter using a cork borer and place onto the surface of the cornea for 1 hour Remove hydrogel and place into a 24 well plate containing 1 ml 1 % (w/v) periodic acid for 10 min Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Place into a 24 well plate containing 1 ml Schiff's reagent for 10 min Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Place into a 24 well plate containing 1 ml of 1mg ml −1 FITC in 0.05M sodium carbonate and 0.1M sodium chloride solution at 4 °C for 1 hour Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Visualise staining using a fluorescent microscope Detection of C.albicans using calcofluor white Grow an overnight BHI broth culture of C. albicans at 37 °C Count the number of cells and adjust the number to 10 8 in 1 ml PBS Add 100 l of fungi to wounded (using scalpel blade no. 22 to make 3 horizontal and 3 vertical slashes across the surface) ex vivo corneas (using a metal ring to ensure a tight seal) Incubate overnight at 37 °C Wash corneas once with PBS Cut a hydrogel disc of 1 cm diameter using a cork borer and place onto the surface of the cornea for 1 hour Remove hydrogel and place into a 24 well plate containing 500 l 10 % (w/v) potassium hydroxide and 500 l calcofluor white for 2 min Wash with copious tap water (rinse in petri dish containing 50 ml water or under a running tap) for 5 min Visualise staining using a fluorescent microscope Total time for procedure is approx. 20 minutes Using these dyes it is possible to sequentially test for all three species as shown in our example results in Fig. 1. As an alternative to bacterial testing ELISA studies product control studies were carried out to determine the functionalization's for hydrogels. There were 3 different protocols to determine the functionalization of vancomycin, polymyxin and amphotericin respectively: Vancomycin ELISA for Hydrogels To a high binding ELISA plate add 100 l monoclonal mouse anti-vancomycin (1:500 dilution in 35 mM NaHCO 3, 15 mM Na 2 CO 3 and 3mM NaN 3 in distilled water) and cover the plate with adhesive plastic Incubate the plate at 4 °C overnight Example results These ELISA's can also be modified to test the concentration of the active drug on the powder polymer additive with minor modifications. Section 2: commercial contact lens modification Commercially available single use contact lenses underwent a three-step modification to attach vancomycin (van) and polymyxin (pmx) functionalised highly branched poly(N-isopropylacrylamide). The carboxylic acid groups within the contact lenses were first modified with excess ethandiamine then the free amine groups from monosubstituted ethandiamine were reacted, forming amide linkages, with remaining carboxylic acid groups on the highly branched polymers. Modification of contact lenses was carried out using autoclaved water and in a sterilised environment. Contact Lenses (Biomedics 1 day Extra, Ocufilcon D, Coopervision) were prepared for polymer attachment via a two stage modification via an excess of N-(3-Dimethylaminopropyl)-N -ethylcarbodiimide hydrochloride (EDC) (or N',N'-dicyclohexyl carbodiimide (DCC) in dimethyl formamide (DMF)) and N-Hydroxysuccinimide (5 10 −4 M) and then mixed with ethylenediamine (0.017 M) and left to react for 24 hours. These contact lenses were washed to remove the supernatant and then soaked in a dilute solution of partially modified antimicrobial polymer for 24 hours. Contact lens functionalised with HB-PNIPAM-pmx were treated with 20% piperidine (5 ml) to remove FMoC blocking groups and washed prior to bacterial detection. The modification of the contact lens can be observed via both the changing opacity of the contact lens (with increasing succinimide modification it became entirely opaque - Fig. 3 A), but also a change in the volume of water the hydrogel can absorb ( Fig. 3 B) and the decrease of FTIR peaks at 1720 and 3300 cm −1 ( Fig. 3 C -signifying reduction of the methacrylic acid loading at the surface of the contact lens) Contact lens modification was carried out in aqueous solution using EDC and N-hyroxysuccinimide in a 1: 0.5 molar ratio (0.0013 moles). 1 ml of solution was added to contact lens and left for 24 hours, rinsed and then reacted with 0.017 M ethylene diamine in 3 ml aqueous solution at pH 10. After 24 hours these were rinsed again before mixing with polymer in a dilute aqueous solution. Two separate batches of vancomycin modified polymer were used (7.3 and 7.5 mg), and others were combined with succinimide-modified polymer with no antimicrobial functionality (5.8 mg). The last batch was mixed with a fluorescein containing polymer to allow for leaching tests of the contact lens to be carried out (5.0 mg). Clinical isolates of S. aureus and P. aeruginosa (1 10 8 ) were FITC labelled and directly incubated with polymer-linked lenses for 1 hour at 37 °C. Lenses were washed and imaged using a light or fluorescent microscope or finely minced and the numbers of viable bacteria recovered enumerated. Cornea or corneal epithelial cell viability was assessed using Alamar Blue after 48h exposure to varying concentrations of soluble van and pmx polymer (1 -5 mg ml −1 ). After application and removal of polymer-functionalised lenses, their ability to bind and remove S.aureus or P.aeruginosa from the infected ex vivo corneas was assessed by light microscopy and histology. Via this protocol we have been able to demonstrate that almost any suitable hydrogel material may be functionalized with the highly branched polymer additive to increased affinity to bacterial or fungal isolates. However, in our studies we have found whilst the resultant modified contact lens attached a high amount of the desired bacteria the binding was far less non-specific than that provided by the GMMA hydrogel. Depending on the target use of the product (desired specificity) it is a viable alternative to fabrication of custom made hydrogels as outlined in the steps above. If the substrate (hydrogel) material used is sufficiently absorbant to bacteria then non-specific binding may be observed as shown In Fig. 4. When the non-specific binding of this system was discovered we suspected it was due to the residual diamine groups and so work was undertaken to block the diamine by addition of 100% acetic acid (1 ml) and EDC (5 mg) to reduce nonspecific bacteria adherence however this was not entirely successful ( Fig. 5 ). Use of commercial contact lens materials also complicates the dye staining process used to the reveal bacterial or fungal isolates. The formulation used in the tested lenses responded to a range of dyes used for testing as shown in Fig. 6. Accuracy and sensitivity The work shown in this paper shows a detection technique which requires both accuracy (ability to correctly disclose positively and to avoid false positives) and as low a limit sensitivity as possible. In all developmental laboratory work and pre-clinical in vivo animal trails carried out on infected rabbit corneas a 100% success rate was detected within a 30 minute exposure, with no toxigenic or immogenic response on the animals seen in the four weeks following testing With laboratory testing the contact lenses indicated sensitivity to approximately 10 4 CFU by luminescence on a plate reader, or even a discrimination between 10 1 and 10 2 CFU when analysed using a microscope. This data uses the optimized GMMA hydrogel backbone and, as this work has shown, will vary if other hydrogel bases or fluorescent dye systems are employed. Conclusions The method describes here two methods for rapidly quantifying bacteria or fungal burden on surfaces using a hydrogel contact lens style swab and disclosing dyes. The first method has been shown to be highly selective and can be employed by laboratories with equipment to produce their own hydrogel sheets. The second shows a method of modifying any acid-functional hydrogel material (such as a commercial contact lens) to create a similar product with the knowledge that some specificity may be lost and additional work may be required to adjust the disclosing dyes following swabbing. However both approaches have been shown to function well at binding and then reporting bacterial burden on ex-vivo corneal surfaces. The main advantage of this method compared to microbiological culture is rapid infection type identification within half an hour (gram positive, gram negative or fungal mode) which would provide a clinical indicator for tertiary care workers before providing a prescription. The materials can be fabricated and stored for several months meaning that following their distribution they can be employed in any remote setting as long as the required dyes (fluorescent vanc, FITC and calcofluor white) are provided as a disclosure kit. Declaration of Competing Interests We gratefully acknowledge support for this research by the Wellcome Trust which provided funding for Swift, Pinnock and Shivshetty (Grant 0998800/B/12/Z ).
Stephen Colbert consulted a magic goose on his show, "The Colbert Report" to decide on whether or not he will hold his own Glenn Beck style rally: a "Restoring Truthiness" gathering. Glenn Beck held a rally in Washington D.C, two weeks ago, calling it a "Restoring Honor" rally. Beck said that 1.7 million people turned up. The press reported about 87,000 people attended. So far Colbert has had 30,000 fans sign a petition for him to hold his "Restoring Truthiness" gathering. The reason that Colbert consulted a goose was because during Beck's rally a flock of geese flew over the crowd. Beck called it a "miracle" and said it was "God's flyover". Colbert also pointed out that birds tend to take flight when tens of thousands of people applaud and shout. Colbert called his bird, his "God Goose" and then went on to consult a glass of Grey Goose vodka. He still hasn't been able to make a decision on whether his gathering will go ahead.
#!/usr/bin/env node import commandLineUsage from 'command-line-usage'; import { Section } from 'command-line-usage'; import dotenv from 'dotenv'; import minimist from 'minimist'; import path from 'path'; import { createWorld } from '../processors'; import { createMenuBasedRepairFunction, createSimpleRepairFunction, fail, formatScoredSuite, handleError, RepairFunction, scoreSuite, succeed, FormatScoredSuiteOptions, } from '../core/test_suite'; import { loadLogicalValidationSuite, writeYAML } from '../test_suite'; function main(): never { dotenv.config(); const args = minimist(process.argv.slice(2)); if (args.h || args.help) { showUsage(); succeed(false); } if (args._.length !== 3 && args._.length !== 2) { fail('Error: expected two or three command line parameters.'); } const expectedFile = args._[0]; const observedFile = args._[1]; const scoredFile = args._[2]; let dataPath: string | undefined; if (!args.s) { dataPath = process.env.PRIX_FIXE_DATA; if (args.d) { dataPath = args.d; } if (dataPath === undefined) { const message = 'Use -d flag or PRIX_FIXE_DATA environment variable to specify data path'; return fail(message); } } try { evaluate(expectedFile, observedFile, scoredFile, dataPath, args.v === true); } catch (e) { handleError(e); } succeed(true); } function evaluate( expectedFile: string, observedFile: string, ouputFile: string | undefined, dataPath: string | undefined, verbose: boolean ) { console.log('Comparing'); console.log(` expected validation suite: ${expectedFile}`); console.log(` observed validation suite: ${observedFile}`); console.log(' '); if (dataPath) { console.log(`Computing repair cost with menu files from ${dataPath}.`); } else { console.log("Using simple repair costs that don't require menu files."); } console.log(' '); // Load the expected validation suite. const expectedSuite = loadLogicalValidationSuite(expectedFile); const observedSuite = loadLogicalValidationSuite(observedFile); let repairs: RepairFunction; let notes: string; if (dataPath) { // Load the world, which provides the AttributeInfo and ICatalog. const world = createWorld(dataPath); repairs = createMenuBasedRepairFunction(world.attributeInfo, world.catalog); notes = 'Menu-based repairs, createWorld'; } else { repairs = createSimpleRepairFunction(); notes = 'Simple repairs'; } const scoredSuite = scoreSuite(observedSuite, expectedSuite, repairs, notes); const options: FormatScoredSuiteOptions = { showDetails: true, showPassing: false, showFailing: verbose, showBySuite: true, showMeasures: true, }; const lines: string[] = []; formatScoredSuite(lines, scoredSuite, options); for (const line of lines) { console.log(line); } if (ouputFile) { console.log(`Writing scored suite to ${ouputFile}`); writeYAML(ouputFile, scoredSuite); } console.log('Scoring complete'); console.log(''); return succeed(true); } function showUsage() { const program = path.basename(process.argv[1]); const usage: Section[] = [ { header: 'Suite evaluation tool', content: 'This utility computes perfect cart, complete cart, and repair cost metrics.', }, { header: 'Usage', content: [ `node ${program} <expected file> <observed file > [output file] [...options]`, ], }, { header: 'Required Parameters', content: [ { name: '<expected file>', summary: 'Path to a LogicalValidationSuite file with the expected carts.', }, { name: '<observed file>', summary: 'Path to a LogicalValidationSuite file with the observed carts.', }, { name: '<output file>', summary: 'Path where the LogicalScoredSuite file will be written. This file is made by adding a measures field to each step in the observed suite.', }, ], }, { header: 'Options', optionList: [ { name: 'datapath', alias: 'd', description: `Path to prix-fixe data files used for menu-based repairs.\n - menu.yaml The {bold -d} flag overrides the value specified in the {bold PRIX_FIXE_DATA} environment variable.\n`, type: Boolean, }, { name: 's', alias: 's', description: "Use simple repair cost scoring that doesn't require menu files.\n" + 'The -d option and PRIX_FIXE_DATA_PATH are not required when using -x.', type: Boolean, }, { name: 'verbose', alias: 'v', description: 'Print out failing test cases', type: Boolean, }, { name: 'help', alias: 'h', description: 'Print help message', type: Boolean, }, ], }, ]; console.log(commandLineUsage(usage)); } main();
<filename>src/test/common/platform/filesystem.unit.test.ts // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. import { expect } from 'chai'; import * as fs from 'fs'; import * as fsextra from 'fs-extra'; import * as TypeMoq from 'typemoq'; import * as vscode from 'vscode'; import { FileSystemUtils, RawFileSystem } from '../../../client/common/platform/fileSystem'; import { FileStat, FileType, // These interfaces are needed for FileSystemUtils deps. IFileSystemPaths, IFileSystemPathUtils, IRawFileSystem, ITempFileSystem, ReadStream, WriteStream } from '../../../client/common/platform/types'; // tslint:disable:max-func-body-length chai-vague-errors function createDummyStat(filetype: FileType): FileStat { //tslint:disable-next-line:no-any return { type: filetype } as any; } interface IPaths { // fs paths (IFileSystemPaths) sep: string; join(...filenames: string[]): string; } interface IRawFS extends IPaths { // vscode.workspace.fs stat(uri: vscode.Uri): Thenable<FileStat>; // "fs-extra" stat(filename: string): Promise<fs.Stats>; lstat(filename: string): Promise<fs.Stats>; readdir(dirname: string): Promise<string[]>; readFile(filename: string): Promise<Buffer>; readFile(filename: string, encoding: string): Promise<string>; mkdirp(dirname: string): Promise<void>; chmod(filePath: string, mode: string | number): Promise<void>; rename(src: string, tgt: string): Promise<void>; writeFile(filename: string, data: {}, options: {}): Promise<void>; appendFile(filename: string, data: {}): Promise<void>; unlink(filename: string): Promise<void>; rmdir(dirname: string): Promise<void>; readFileSync(path: string, encoding: string): string; createReadStream(filename: string): ReadStream; createWriteStream(filename: string): WriteStream; } suite('Raw FileSystem', () => { let raw: TypeMoq.IMock<IRawFS>; let oldStats: TypeMoq.IMock<fs.Stats>[]; let filesystem: RawFileSystem; setup(() => { raw = TypeMoq.Mock.ofType<IRawFS>(undefined, TypeMoq.MockBehavior.Strict); oldStats = []; filesystem = new RawFileSystem( // Since it's a mock we can just use it for all 3 values. raw.object, raw.object, raw.object ); }); function verifyAll() { raw.verifyAll(); oldStats.forEach(stat => { stat.verifyAll(); }); } function createMockLegacyStat(): TypeMoq.IMock<fsextra.Stats> { const stat = TypeMoq.Mock.ofType<fsextra.Stats>(undefined, TypeMoq.MockBehavior.Strict); // This is necessary because passing "mock.object" to // Promise.resolve() triggers the lookup. //tslint:disable-next-line:no-any stat.setup((s: any) => s.then) .returns(() => undefined) .verifiable(TypeMoq.Times.atLeast(0)); oldStats.push(stat); return stat; } function setupStatFileType(stat: TypeMoq.IMock<fs.Stats>, filetype: FileType) { // This mirrors the logic in convertFileType(). if (filetype === FileType.File) { stat.setup(s => s.isFile()) .returns(() => true) .verifiable(TypeMoq.Times.atLeastOnce()); } else if (filetype === FileType.Directory) { stat.setup(s => s.isFile()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); stat.setup(s => s.isDirectory()) .returns(() => true) .verifiable(TypeMoq.Times.atLeastOnce()); } else if ((filetype & FileType.SymbolicLink) > 0) { stat.setup(s => s.isFile()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); stat.setup(s => s.isDirectory()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); stat.setup(s => s.isSymbolicLink()) .returns(() => true) .verifiable(TypeMoq.Times.atLeastOnce()); } else if (filetype === FileType.Unknown) { stat.setup(s => s.isFile()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); stat.setup(s => s.isDirectory()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); stat.setup(s => s.isSymbolicLink()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); } else { throw Error(`unsupported file type ${filetype}`); } } suite('stat', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; const expected = createDummyStat(FileType.File); raw.setup(r => r.stat(vscode.Uri.file(filename))) // expect the specific filename .returns(() => Promise.resolve(expected)); const stat = await filesystem.stat(filename); expect(stat).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.stat(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.stat('spam.py'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('lstat', () => { function copyStat(stat: FileStat, old: TypeMoq.IMock<fsextra.Stats>) { old.setup(s => s.size) // plug in the original value .returns(() => stat.size); old.setup(s => s.ctimeMs) // plug in the original value .returns(() => stat.ctime); old.setup(s => s.mtimeMs) // plug in the original value .returns(() => stat.mtime); } [ { kind: 'file', filetype: FileType.File }, { kind: 'dir', filetype: FileType.Directory }, { kind: 'symlink', filetype: FileType.SymbolicLink }, { kind: 'unknown', filetype: FileType.Unknown } ].forEach(testData => { test(`wraps the low-level function (filetype: ${testData.kind}`, async () => { const filename = 'x/y/z/spam.py'; const expected: FileStat = { type: testData.filetype, size: 10, ctime: 101, mtime: 102 //tslint:disable-next-line:no-any } as any; const old = createMockLegacyStat(); setupStatFileType(old, testData.filetype); copyStat(expected, old); raw.setup(r => r.lstat(filename)) // expect the specific filename .returns(() => Promise.resolve(old.object)); const stat = await filesystem.lstat(filename); expect(stat).to.deep.equal(expected); verifyAll(); }); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.lstat(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.lstat('spam.py'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('chmod', () => { test('passes through a string mode', async () => { const filename = 'x/y/z/spam.py'; const mode = '755'; raw.setup(r => r.chmod(filename, mode)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.chmod(filename, mode); verifyAll(); }); test('passes through an int mode', async () => { const filename = 'x/y/z/spam.py'; const mode = 0o755; raw.setup(r => r.chmod(filename, mode)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.chmod(filename, mode); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.chmod(TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.chmod('spam.py', 755); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('move', () => { test('wraps the low-level function', async () => { const src = 'x/y/z/spam.py'; const tgt = 'x/y/spam.py'; raw.setup(r => r.rename(src, tgt)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.move(src, tgt); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.rename(TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.move('spam', 'eggs'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('readData', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; const expected = Buffer.from('<data>'); raw.setup(r => r.readFile(filename)) // expect the specific filename .returns(() => Promise.resolve(expected)); const data = await filesystem.readData(filename); expect(data).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.readFile(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.readData('spam.py'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('readText', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; const expected = '<text>'; raw.setup(r => r.readFile(filename, 'utf8')) // expect the specific filename .returns(() => Promise.resolve(expected)); const text = await filesystem.readText(filename); expect(text).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.readFile(TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.readText('spam.py'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('writeText', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; const text = '<text>'; raw.setup(r => r.writeFile(filename, text, { encoding: 'utf8' })) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.writeText(filename, text); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.writeFile(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.writeText('spam.py', '<text>'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('appendText', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; const text = '<text>'; raw.setup(r => r.appendFile(filename, text)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.appendText(filename, text); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.appendFile(TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.appendText('spam.py', '<text>'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('copyFile', () => { type StreamCallbacks = { err(err: Error): void; close(): void; }; function setupMocks(src: string, tgt: string): { r: StreamCallbacks; w: StreamCallbacks } { const callbacks = { //tslint:disable-next-line:no-any r: ({} as any) as StreamCallbacks, //tslint:disable-next-line:no-any w: ({} as any) as StreamCallbacks }; const wstream = TypeMoq.Mock.ofType<fs.WriteStream>(undefined, TypeMoq.MockBehavior.Strict); wstream .setup(s => s.on('error', TypeMoq.It.isAny())) .callback((_e, cb) => { callbacks.w.err = cb; }) .returns(() => wstream.object); wstream .setup(s => s.on('close', TypeMoq.It.isAny())) .callback((_e, cb) => { callbacks.w.close = cb; }) .returns(() => wstream.object); wstream //tslint:disable-next-line:no-any .setup((s: any) => s.___matches) // typemoq sometimes outsmarts itself .returns(() => undefined); raw.setup(r => r.createWriteStream(tgt)) // expect the specific filename .returns(() => wstream.object); const rstream = TypeMoq.Mock.ofType<fs.ReadStream>(undefined, TypeMoq.MockBehavior.Strict); rstream .setup(s => s.on('error', TypeMoq.It.isAny())) .callback((_e, cb) => { callbacks.r.err = cb; }) .returns(() => rstream.object); rstream.setup(s => s.pipe(wstream.object)); raw.setup(r => r.createReadStream(src)) // expect the specific filename .returns(() => rstream.object); return callbacks; } test('wraps the low-level function', async () => { const src = 'spam.py'; const tgt = 'eggs.py'; const cb = setupMocks(src, tgt); // Due to the use of deferred, we must call the handler // registered on the stream in order to make the promise // resolve. const promise = filesystem.copyFile(src, tgt); cb.w.close(); await promise; verifyAll(); }); test('fails if createReadStream fails', async () => { raw.setup(r => r.createReadStream(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.copyFile('spam', 'eggs'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); test('fails if createWriteStream fails', async () => { const rstream = TypeMoq.Mock.ofType<fs.ReadStream>(undefined, TypeMoq.MockBehavior.Strict); rstream.setup(s => s.on('error', TypeMoq.It.isAny())); raw.setup(r => r.createReadStream(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.copyFile('spam', 'eggs'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); test('fails if read stream errors out', async () => { const src = 'spam.py'; const tgt = 'eggs.py'; const cb = setupMocks(src, tgt); const promise = filesystem.copyFile(src, tgt); cb.r.err(new Error('oops!')); await expect(promise).to.eventually.be.rejected; verifyAll(); }); test('fails if write stream errors out', async () => { const src = 'spam.py'; const tgt = 'eggs.py'; const cb = setupMocks(src, tgt); const promise = filesystem.copyFile(src, tgt); cb.w.err(new Error('oops!')); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('rmFile', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; raw.setup(r => r.unlink(filename)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.rmfile(filename); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.unlink(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.rmfile('spam.py'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('mkdirp', () => { test('wraps the low-level function', async () => { const dirname = 'x/y/z/spam'; raw.setup(r => r.mkdirp(dirname)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.mkdirp(dirname); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.mkdirp(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.mkdirp('spam'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('rmtree', () => { test('wraps the low-level function', async () => { const dirname = 'x/y/z/spam'; raw.setup(r => r.rmdir(dirname)) // expect the specific filename .returns(() => Promise.resolve()); await filesystem.rmtree(dirname); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.rmdir(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.rmtree('spam'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('listdir', () => { function setupForFileType(filename: string, filetype: FileType) { const lstat = createMockLegacyStat(); if ((filetype & FileType.SymbolicLink) > 0) { lstat .setup(s => s.isSymbolicLink()) // we don't care about any other type here .returns(() => true); const stat = createMockLegacyStat(); // filetype won't be Unknown here. setupStatFileType(stat, filetype - FileType.SymbolicLink); raw.setup(r => r.stat(filename)) // expect the specific filename .returns(() => Promise.resolve(stat.object)); } else { lstat .setup(s => s.isSymbolicLink()) .returns(() => false) .verifiable(TypeMoq.Times.atLeastOnce()); setupStatFileType(lstat, filetype); } raw.setup(r => r.lstat(filename)) // expect the specific filename .returns(() => Promise.resolve(lstat.object)); } test('mixed', async () => { const dirname = 'x/y/z/spam'; const names = [ // These match the items in "expected". 'dev1', 'w', 'spam.py', 'other' ]; const expected: [string, FileType][] = [ ['x/y/z/spam/dev1', FileType.Unknown], ['x/y/z/spam/w', FileType.Directory], ['x/y/z/spam/spam.py', FileType.File], ['x/y/z/spam/other', FileType.SymbolicLink | FileType.File] ]; raw.setup(r => r.readdir(dirname)) // expect the specific filename .returns(() => Promise.resolve(names)); names.forEach((name, i) => { const [filename, filetype] = expected[i]; raw.setup(r => r.join(dirname, name)) // expect the specific filename .returns(() => filename); setupForFileType(filename, filetype); }); const entries = await filesystem.listdir(dirname); expect(entries).to.deep.equal(expected); verifyAll(); }); test('empty', async () => { const dirname = 'x/y/z/spam'; const expected: [string, FileType][] = []; raw.setup(r => r.readdir(dirname)) // expect the specific filename .returns(() => Promise.resolve([])); const entries = await filesystem.listdir(dirname); expect(entries).to.deep.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.readdir(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); const promise = filesystem.listdir('spam'); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('readTextSync', () => { test('wraps the low-level function', () => { const filename = 'x/y/z/spam.py'; const expected = '<text>'; raw.setup(r => r.readFileSync(filename, 'utf8')) // expect the specific filename .returns(() => expected); const text = filesystem.readTextSync(filename); expect(text).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.readFileSync(TypeMoq.It.isAny(), TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); expect(() => filesystem.readTextSync('spam.py')).to.throw(); verifyAll(); }); }); suite('createReadStream', () => { test('wraps the low-level function', () => { const filename = 'x/y/z/spam.py'; //tslint:disable-next-line:no-any const expected = {} as any; raw.setup(r => r.createReadStream(filename)) // expect the specific filename .returns(() => expected); const stream = filesystem.createReadStream(filename); expect(stream).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.createReadStream(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); expect(() => filesystem.createReadStream('spam.py')).to.throw(); verifyAll(); }); }); suite('createWriteStream', () => { test('wraps the low-level function', () => { const filename = 'x/y/z/spam.py'; //tslint:disable-next-line:no-any const expected = {} as any; raw.setup(r => r.createWriteStream(filename)) // expect the specific filename .returns(() => expected); const stream = filesystem.createWriteStream(filename); expect(stream).to.equal(expected); verifyAll(); }); test('fails if the low-level call fails', async () => { raw.setup(r => r.createWriteStream(TypeMoq.It.isAny())) // We don't care about the filename. .throws(new Error('file not found')); expect(() => filesystem.createWriteStream('spam.py')).to.throw(); verifyAll(); }); }); }); interface IUtilsDeps extends IRawFileSystem, IFileSystemPaths, IFileSystemPathUtils, ITempFileSystem { // fs open(path: string, flags: string | number, mode?: string | number | null): Promise<number>; close(fd: number): Promise<void>; unlink(path: string): Promise<void>; existsSync(path: string): boolean; // helpers getHash(data: string): string; globFile(pat: string, options?: { cwd: string }): Promise<string[]>; } suite('FileSystemUtils', () => { let deps: TypeMoq.IMock<IUtilsDeps>; let stats: TypeMoq.IMock<FileStat>[]; let utils: FileSystemUtils; setup(() => { deps = TypeMoq.Mock.ofType<IUtilsDeps>(undefined, TypeMoq.MockBehavior.Strict); stats = []; utils = new FileSystemUtils( // Since it's a mock we can just use it for all 3 values. deps.object, // rawFS deps.object, // pathUtils deps.object, // paths deps.object, // tempFS deps.object, // fs (data: string) => deps.object.getHash(data), (pat: string, options?: { cwd: string }) => deps.object.globFile(pat, options) ); }); function verifyAll() { deps.verifyAll(); stats.forEach(stat => { stat.verifyAll(); }); } function createMockStat(): TypeMoq.IMock<FileStat> { const stat = TypeMoq.Mock.ofType<FileStat>(undefined, TypeMoq.MockBehavior.Strict); // This is necessary because passing "mock.object" to // Promise.resolve() triggers the lookup. //tslint:disable-next-line:no-any stat.setup((s: any) => s.then) .returns(() => undefined) .verifiable(TypeMoq.Times.atLeast(0)); stats.push(stat); return stat; } suite('createDirectory', () => { test('wraps the low-level function', async () => { const dirname = 'x/y/z/spam'; deps.setup(d => d.mkdirp(dirname)) // expect the specific filename .returns(() => Promise.resolve()); await utils.createDirectory(dirname); verifyAll(); }); }); suite('deleteDirectory', () => { test('wraps the low-level function', async () => { const dirname = 'x/y/z/spam'; deps.setup(d => d.rmtree(dirname)) // expect the specific filename .returns(() => Promise.resolve()); await utils.deleteDirectory(dirname); verifyAll(); }); }); suite('deleteFile', () => { test('wraps the low-level function', async () => { const filename = 'x/y/z/spam.py'; deps.setup(d => d.rmfile(filename)) // expect the specific filename .returns(() => Promise.resolve()); await utils.deleteFile(filename); verifyAll(); }); }); suite('pathExists', () => { test('exists (without type)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename); expect(exists).to.equal(true); verifyAll(); }); test('does not exist', async () => { const filename = 'x/y/z/spam.py'; const err = vscode.FileSystemError.FileNotFound(filename); deps.setup(d => d.stat(filename)) // The file does not exist. .throws(err); const exists = await utils.pathExists(filename); expect(exists).to.equal(false); verifyAll(); }); test('fails if stat fails', async () => { const filename = 'x/y/z/spam.py'; const err = new Error('oops!'); deps.setup(d => d.stat(filename)) // There was a problem while stat'ing the file. .throws(err); const promise = utils.pathExists(filename); await expect(promise).to.eventually.be.rejected; verifyAll(); }); test('matches (type: undefined)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename); expect(exists).to.equal(true); verifyAll(); }); test('matches (type: file)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a file. .returns(() => FileType.File); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename, FileType.File); expect(exists).to.equal(true); verifyAll(); }); test('mismatch (type: file)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a directory. .returns(() => FileType.Directory); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename, FileType.File); expect(exists).to.equal(false); verifyAll(); }); test('matches (type: directory)', async () => { const dirname = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a directory. .returns(() => FileType.Directory); deps.setup(d => d.stat(dirname)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(dirname, FileType.Directory); expect(exists).to.equal(true); verifyAll(); }); test('mismatch (type: directory)', async () => { const dirname = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a file. .returns(() => FileType.File); deps.setup(d => d.stat(dirname)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(dirname, FileType.Directory); expect(exists).to.equal(false); verifyAll(); }); test('symlinks are followed', async () => { const symlink = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a symlink to a file. .returns(() => FileType.File | FileType.SymbolicLink) .verifiable(TypeMoq.Times.exactly(3)); deps.setup(d => d.stat(symlink)) // The "file" exists. .returns(() => Promise.resolve(stat.object)) .verifiable(TypeMoq.Times.exactly(3)); const exists = await utils.pathExists(symlink, FileType.SymbolicLink); const destIsFile = await utils.pathExists(symlink, FileType.File); const destIsDir = await utils.pathExists(symlink, FileType.Directory); expect(exists).to.equal(true); expect(destIsFile).to.equal(true); expect(destIsDir).to.equal(false); verifyAll(); }); test('mismatch (type: symlink)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a file. .returns(() => FileType.File); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename, FileType.SymbolicLink); expect(exists).to.equal(false); verifyAll(); }); test('matches (type: unknown)', async () => { const sockFile = 'x/y/z/ipc.sock'; const stat = createMockStat(); stat.setup(s => s.type) // It's a socket. .returns(() => FileType.Unknown); deps.setup(d => d.stat(sockFile)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(sockFile, FileType.Unknown); expect(exists).to.equal(true); verifyAll(); }); test('mismatch (type: unknown)', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a file. .returns(() => FileType.File); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.pathExists(filename, FileType.Unknown); expect(exists).to.equal(false); verifyAll(); }); }); suite('fileExists', () => { test('want file, got file', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a File. .returns(() => FileType.File); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.fileExists(filename); expect(exists).to.equal(true); verifyAll(); }); test('want file, not file', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a directory. .returns(() => FileType.Directory); deps.setup(d => d.stat(filename)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.fileExists(filename); expect(exists).to.equal(false); verifyAll(); }); test('symlink', async () => { const symlink = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.type) // It's a symlink to a File. .returns(() => FileType.File | FileType.SymbolicLink); deps.setup(d => d.stat(symlink)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.fileExists(symlink); // This is because we currently use stat() and not lstat(). expect(exists).to.equal(true); verifyAll(); }); test('unknown', async () => { const sockFile = 'x/y/z/ipc.sock'; const stat = createMockStat(); stat.setup(s => s.type) // It's a socket. .returns(() => FileType.Unknown); deps.setup(d => d.stat(sockFile)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.fileExists(sockFile); expect(exists).to.equal(false); verifyAll(); }); }); suite('directoryExists', () => { test('want directory, got directory', async () => { const dirname = 'x/y/z/spam'; const stat = createMockStat(); stat.setup(s => s.type) // It's a directory. .returns(() => FileType.Directory); deps.setup(d => d.stat(dirname)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.directoryExists(dirname); expect(exists).to.equal(true); verifyAll(); }); test('want directory, not directory', async () => { const dirname = 'x/y/z/spam'; const stat = createMockStat(); stat.setup(s => s.type) // It's a file. .returns(() => FileType.File); deps.setup(d => d.stat(dirname)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.directoryExists(dirname); expect(exists).to.equal(false); verifyAll(); }); test('symlink', async () => { const symlink = 'x/y/z/spam'; const stat = createMockStat(); stat.setup(s => s.type) // It's a symlink to a directory. .returns(() => FileType.Directory | FileType.SymbolicLink); deps.setup(d => d.stat(symlink)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.directoryExists(symlink); // This is because we currently use stat() and not lstat(). expect(exists).to.equal(true); verifyAll(); }); test('unknown', async () => { const sockFile = 'x/y/z/ipc.sock'; const stat = createMockStat(); stat.setup(s => s.type) // It's a socket. .returns(() => FileType.Unknown); deps.setup(d => d.stat(sockFile)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const exists = await utils.directoryExists(sockFile); expect(exists).to.equal(false); verifyAll(); }); }); suite('listdir', () => { test('wraps the raw call on success', async () => { const dirname = 'x/y/z/spam'; const expected: [string, FileType][] = [ ['x/y/z/spam/dev1', FileType.Unknown], ['x/y/z/spam/w', FileType.Directory], ['x/y/z/spam/spam.py', FileType.File], ['x/y/z/spam/other', FileType.SymbolicLink | FileType.File] ]; deps.setup(d => d.listdir(dirname)) // Full results get returned from RawFileSystem.listdir(). .returns(() => Promise.resolve(expected)); const entries = await utils.listdir(dirname); expect(entries).to.deep.equal(expected); verifyAll(); }); test('returns [] if the directory does not exist', async () => { const dirname = 'x/y/z/spam'; const err = vscode.FileSystemError.FileNotFound(dirname); deps.setup(d => d.listdir(dirname)) // The "file" does not exist. .returns(() => Promise.reject(err)); deps.setup(d => d.stat(dirname)) // The "file" does not exist. .returns(() => Promise.reject(err)); const entries = await utils.listdir(dirname); expect(entries).to.deep.equal([]); verifyAll(); }); test('fails if not a directory', async () => { const dirname = 'x/y/z/spam'; const err = vscode.FileSystemError.FileNotADirectory(dirname); deps.setup(d => d.listdir(dirname)) // Fail (async) with not-a-directory. .returns(() => Promise.reject(err)); const stat = createMockStat(); deps.setup(d => d.stat(dirname)) // The "file" exists. .returns(() => Promise.resolve(stat.object)); const promise = utils.listdir(dirname); await expect(promise).to.eventually.be.rejected; verifyAll(); }); test('fails if the raw call promise fails', async () => { const dirname = 'x/y/z/spam'; const err = new Error('oops!'); deps.setup(d => d.listdir(dirname)) // Fail (async) with an arbitrary error. .returns(() => Promise.reject(err)); deps.setup(d => d.stat(dirname)) // Fail with file-not-found. .throws(vscode.FileSystemError.FileNotFound(dirname)); const entries = await utils.listdir(dirname); expect(entries).to.deep.equal([]); verifyAll(); }); test('fails if the raw call fails', async () => { const dirname = 'x/y/z/spam'; const err = new Error('oops!'); deps.setup(d => d.listdir(dirname)) // Fail with an arbirary error. .throws(err); const promise = utils.listdir(dirname); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('getSubDirectories', () => { test('filters out non-subdirs', async () => { const dirname = 'x/y/z/spam'; const entries: [string, FileType][] = [ ['x/y/z/spam/dev1', FileType.Unknown], ['x/y/z/spam/w', FileType.Directory], ['x/y/z/spam/spam.py', FileType.File], ['x/y/z/spam/v', FileType.Directory], ['x/y/z/spam/eggs.py', FileType.File], ['x/y/z/spam/other1', FileType.SymbolicLink | FileType.File], ['x/y/z/spam/other2', FileType.SymbolicLink | FileType.Directory] ]; const expected = [ // only entries with FileType.Directory 'x/y/z/spam/w', 'x/y/z/spam/v', 'x/y/z/spam/other2' ]; deps.setup(d => d.listdir(dirname)) // Full results get returned from RawFileSystem.listdir(). .returns(() => Promise.resolve(entries)); const filtered = await utils.getSubDirectories(dirname); expect(filtered).to.deep.equal(expected); verifyAll(); }); }); suite('getFiles', () => { test('filters out non-files', async () => { const filename = 'x/y/z/spam'; const entries: [string, FileType][] = [ ['x/y/z/spam/dev1', FileType.Unknown], ['x/y/z/spam/w', FileType.Directory], ['x/y/z/spam/spam.py', FileType.File], ['x/y/z/spam/v', FileType.Directory], ['x/y/z/spam/eggs.py', FileType.File], ['x/y/z/spam/other1', FileType.SymbolicLink | FileType.File], ['x/y/z/spam/other2', FileType.SymbolicLink | FileType.Directory] ]; const expected = [ // only entries with FileType.File 'x/y/z/spam/spam.py', 'x/y/z/spam/eggs.py', 'x/y/z/spam/other1' ]; deps.setup(d => d.listdir(filename)) // Full results get returned from RawFileSystem.listdir(). .returns(() => Promise.resolve(entries)); const filtered = await utils.getFiles(filename); expect(filtered).to.deep.equal(expected); verifyAll(); }); }); suite('isDirReadonly', () => { const flags = fs.constants.O_CREAT | fs.constants.O_RDWR; setup(() => { deps.setup(d => d.sep) // The value really doesn't matter. .returns(() => '/'); }); test('is readonly', async () => { const dirname = 'x/y/z/spam'; const fd = 10; const filename = `${dirname}/___vscpTest___`; deps.setup(d => d.open(filename, flags)) // Success! .returns(() => Promise.resolve(fd)); deps.setup(d => d.close(fd)) // Success! .returns(() => Promise.resolve()); deps.setup(d => d.unlink(filename)) // Success! .returns(() => Promise.resolve()); const isReadonly = await utils.isDirReadonly(dirname); expect(isReadonly).to.equal(false); verifyAll(); }); test('is not readonly', async () => { const dirname = 'x/y/z/spam'; const filename = `${dirname}/___vscpTest___`; const err = new Error('not permitted'); // tslint:disable-next-line:no-any (err as any).code = 'EACCES'; // errno deps.setup(d => d.open(filename, flags)) // not permitted .returns(() => Promise.reject(err)); const isReadonly = await utils.isDirReadonly(dirname); expect(isReadonly).to.equal(true); verifyAll(); }); test('fails if the directory does not exist', async () => { const dirname = 'x/y/z/spam'; const filename = `${dirname}/___vscpTest___`; const err = new Error('not found'); // tslint:disable-next-line:no-any (err as any).code = 'ENOENT'; // errno deps.setup(d => d.open(filename, flags)) // file-not-found .returns(() => Promise.reject(err)); const promise = utils.isDirReadonly(dirname); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('getFileHash', () => { test('Getting hash for a file should return non-empty string', async () => { const filename = 'x/y/z/spam.py'; const stat = createMockStat(); stat.setup(s => s.ctime) // created .returns(() => 100); stat.setup(s => s.mtime) // modified .returns(() => 120); deps.setup(d => d.lstat(filename)) // file exists .returns(() => Promise.resolve(stat.object)); deps.setup(d => d.getHash('100-120')) // built from ctime and mtime .returns(() => 'deadbeef'); const hash = await utils.getFileHash(filename); expect(hash).to.equal('deadbeef'); verifyAll(); }); test('Getting hash for non existent file should throw error', async () => { const filename = 'x/y/z/spam.py'; const err = vscode.FileSystemError.FileNotFound(filename); deps.setup(d => d.lstat(filename)) // file-not-found .returns(() => Promise.reject(err)); const promise = utils.getFileHash(filename); await expect(promise).to.eventually.be.rejected; verifyAll(); }); }); suite('search', () => { test('found matches (without cwd)', async () => { const pattern = `x/y/z/spam.*`; const expected: string[] = [ // We can pretend that there were other files // that were ignored. 'x/y/z/spam.py', 'x/y/z/spam.pyc', 'x/y/z/spam.so', 'x/y/z/spam.data' ]; deps.setup(d => d.globFile(pattern, undefined)) // found some .returns(() => Promise.resolve(expected)); const files = await utils.search(pattern); expect(files).to.deep.equal(expected); verifyAll(); }); test('found matches (with cwd)', async () => { const pattern = `x/y/z/spam.*`; const cwd = 'a/b/c'; const expected: string[] = [ // We can pretend that there were other files // that were ignored. 'x/y/z/spam.py', 'x/y/z/spam.pyc', 'x/y/z/spam.so', 'x/y/z/spam.data' ]; deps.setup(d => d.globFile(pattern, { cwd: cwd })) // found some .returns(() => Promise.resolve(expected)); const files = await utils.search(pattern, cwd); expect(files).to.deep.equal(expected); verifyAll(); }); test('no matches (empty)', async () => { const pattern = `x/y/z/spam.*`; deps.setup(d => d.globFile(pattern, undefined)) // found none .returns(() => Promise.resolve([])); const files = await utils.search(pattern); expect(files).to.deep.equal([]); verifyAll(); }); test('no matches (undefined)', async () => { const pattern = `x/y/z/spam.*`; deps.setup(d => d.globFile(pattern, undefined)) // found none .returns(() => Promise.resolve((undefined as unknown) as string[])); const files = await utils.search(pattern); expect(files).to.deep.equal([]); verifyAll(); }); }); suite('fileExistsSync', () => { test('file exists', async () => { const filename = 'x/y/z/spam.py'; deps.setup(d => d.existsSync(filename)) // The file exists. .returns(() => true); const exists = utils.fileExistsSync(filename); expect(exists).to.equal(true); verifyAll(); }); test('file does not exist', async () => { const filename = 'x/y/z/spam.py'; deps.setup(d => d.existsSync(filename)) // The file does not exist. .returns(() => false); const exists = utils.fileExistsSync(filename); expect(exists).to.equal(false); verifyAll(); }); test('fails if low-level call fails', async () => { const filename = 'x/y/z/spam.py'; const err = new Error('oops!'); deps.setup(d => d.existsSync(filename)) // big badda boom .throws(err); expect(() => utils.fileExistsSync(filename)).to.throw(err); verifyAll(); }); }); });
// Remove - remove entry read from clientContent channel. func (f *fsClient) Remove(isIncomplete, isRemoveBucket bool, contentCh <-chan *clientContent) <-chan *probe.Error { errorCh := make(chan *probe.Error) go func() { defer close(errorCh) for content := range contentCh { name := content.URL.Path if isIncomplete { name += partSuffix } if err := os.Remove(name); err != nil { if os.IsPermission(err) { errorCh <- probe.NewError(PathInsufficientPermission{Path: content.URL.Path}) } else { errorCh <- probe.NewError(err) return } } } }() return errorCh }
Reduced in vivo Ocular Surface Toxicity with Polyquad-Preserved Travoprost versus Benzalkonium-Preserved Travoprost or Latanoprost Ophthalmic Solutions The study used a validated acute in vivo model to compare a new formulation of travoprost 0.004% ophthalmic solution (travoprost PQ), preserved with polyquaternium-1 (PQ), with commercially available formulations of benzalkonium-chloride (BAK)-preserved travoprost 0.004% ophthalmic solution (travoprost BAK) and BAK-preserved latanoprost 0.005% ophthalmic solution (latanoprost BAK). Adult male New Zealand albino rabbits (n = 36) were randomly divided into 6 groups. Phosphate-buffered saline (PBS), 0.001% PQ, 0.015% BAK, travoprost PQ, travoprost BAK or latanoprost BAK were applied onto rabbit eyes as 1 drop, for 15 times at 5-min intervals. The ocular surface reactions were investigated at hour 4 and day 1 using slitlamp examination; in vivo confocal microscopy (IVCM) for cornea, limbus and conjunctiva/conjunctiva-associated lymphoid tissue, conjunctival impression cytology and standard immunohistology in cryosections for detecting CD45+ infiltrating cells and MUC-5AC-labeled cells. PBS, PQ and travoprost PQ did not induce obvious irritation by clinical observation, changes in microstructures of the whole ocular surface as measured by IVCM analysis, inflammatory infiltration or cell damage as measured by impression cytology, altered levels of goblet cell counts or numerous CD45+ cells in the cornea. In contrast, all BAK-containing products induced diffuse conjunctival hyperemia and chemosis, abnormal changes in the ocular surface microstructure, significant total ocular surface toxicity scores, damaged epithelial cells, inflammatory cell infiltration and decreased goblet cell density. Travoprost PQ did not elicit ocular surface toxicity when administered to rabbit eyes. These results suggest a greater safety advantage for the ocular surface of patients receiving chronic glaucoma treatment with PQ-preserved drugs.
<reponame>hardik0899/Competitive_Programming<filename>Kattis/zapis.cpp #define __USE_MINGW_ANSI_STDIO 0 #include <iostream> #include <iomanip> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <queue> #include <map> #include <unordered_map> #include <set> #include <unordered_set> #include <stack> #include <deque> #include <string.h> #include <math.h> using namespace std; #define PI 4.0*atan(1.0) #define epsilon 0.000000001 #define INF 1000000000000000000 #define MOD 1000000007 int N; string open = "[{(", close = "]})", s; long long dp [210][210]; bool needZeroes [210][210]; string ret; long long solveIt(int li, int ri){ if(dp[li][ri] != -1) return dp[li][ri]; if(li > ri) return dp[li][ri] = 1; dp[li][ri] = 0ll; for(int i = li+1; i <= ri; i += 2) for(int j = 0; j < 3; j++) if(s[li] == open[j] || s[li] == '?') if(s[i] == close[j] || s[i] == '?'){ if(dp[li][ri]+solveIt(li+1, i-1)*solveIt(i+1, ri) >= 100000) needZeroes[li][ri] = true; dp[li][ri] = (dp[li][ri]+solveIt(li+1, i-1)*solveIt(i+1, ri))%100000; } return dp[li][ri]; } int main(){ //freopen("roboherd.in", "r", stdin); freopen("roboherd.out", "w", stdout); ios_base::sync_with_stdio(0); cin.tie(0); cout << fixed << setprecision(18); cin >> N >> s; memset(dp, -1, sizeof(dp)); memset(needZeroes, false, sizeof(needZeroes)); ret = to_string(solveIt(0, N-1)); if(needZeroes[0][N-1]) while(ret.length() < 5) ret = "0"+ret; cout << ret << '\n'; return 0; }
/** * Broadcast finished download (success or failure). * @return true if a running receiver received the broadcast. */ private boolean broadcastDownloadFinished(String downloadPath, long bytesDownloaded) { boolean success = bytesDownloaded != -1; String action = success ? DOWNLOAD_COMPLETED : DOWNLOAD_ERROR; Intent broadcast = new Intent(action) .putExtra(EXTRA_DOWNLOAD_PATH, downloadPath) .putExtra(EXTRA_BYTES_DOWNLOADED, bytesDownloaded); return LocalBroadcastManager.getInstance(getApplicationContext()).sendBroadcast(broadcast); }
<reponame>CatalinCsnMaster/vendure<filename>packages/core/src/entity/asset/asset.entity.ts<gh_stars>1-10 import { AssetType } from '@vendure/common/lib/generated-types'; import { DeepPartial } from '@vendure/common/lib/shared-types'; import { Column, Entity, JoinTable, ManyToMany } from 'typeorm'; import { ChannelAware, Taggable } from '../../common/types/common-types'; import { HasCustomFields } from '../../config/custom-field/custom-field-types'; import { VendureEntity } from '../base/base.entity'; import { Channel } from '../channel/channel.entity'; import { CustomAssetFields } from '../custom-entity-fields'; import { Tag } from '../tag/tag.entity'; /** * @description * An Asset represents a file such as an image which can be associated with certain other entities * such as Products. * * @docsCategory entities */ @Entity() export class Asset extends VendureEntity implements Taggable, ChannelAware, HasCustomFields { constructor(input?: DeepPartial<Asset>) { super(input); } @Column() name: string; @Column('varchar') type: AssetType; @Column() mimeType: string; @Column({ default: 0 }) width: number; @Column({ default: 0 }) height: number; @Column() fileSize: number; @Column() source: string; @Column() preview: string; @Column('simple-json', { nullable: true }) focalPoint?: { x: number; y: number }; @ManyToMany(type => Tag) @JoinTable() tags: Tag[]; @ManyToMany(type => Channel) @JoinTable() channels: Channel[]; @Column(type => CustomAssetFields) customFields: CustomAssetFields; }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. void inituvm(pml4e_t *pml4, char *init, uint sz) { char *mem; if(sz >= PGSIZE) panic("inituvm: more than a page"); mem = kalloc(); memset(mem, 0, PGSIZE); mappages(pml4, 0, PGSIZE, v2p(mem), PTE_W|PTE_U); memmove(mem, init, sz); }
<filename>setup.py import setuptools from powerhub._version import __version__ setuptools.setup( name='PowerHub', version=__version__, author='<NAME>', url='https://github.com/AdrianVollmer/PowerHub', description='A post exploitation tool based on a web application, ' 'focusing on bypassing endpoint protection and ' 'application whitelisting', long_description=open('README.md', 'r').read(), long_description_content_type='text/markdown', packages=setuptools.find_packages(), include_package_data=True, entry_points={ 'console_scripts': [ 'powerhub=powerhub.__main__:main' ], }, install_requires=[ 'cheroot', 'cryptography', 'Flask>=1.0.2', 'Flask-SocketIO>=3.1.2', 'flask-sqlalchemy>=2.1', 'pyOpenSSL', 'pypykatz>=0.2.2', 'service_identity', 'twisted>=18.9.0', 'watchdog', 'werkzeug>=0.15', 'wsgidav>=3.0.0', ], python_requires='>=3', extras_require={ 'tests': ['pytest', 'beautifulsoup4', 'lxml'] }, classifiers=[ 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', ], )
<reponame>DaQun/jira-api-library package com.github.daqun.jira.permission; /** * @Description * @Date 2019/4/12 11:22 * @Created by chenq */ @Deprecated public class UserRealm { }
package com.nettoolkit.internal.request; import com.nettoolkit.exception.ParsingException; import com.nettoolkit.internal.NetToolKitClient; import com.nettoolkit.internal.request.BaseApiRequest; import com.nettoolkit.internal.http.HttpMethod; public abstract class DeleteRequest extends BaseApiRequest { public DeleteRequest(NetToolKitClient client) { super(client); } @Override protected HttpMethod getHttpMethod() { return HttpMethod.DELETE; } @Override protected String serializeParameters() throws ParsingException { return getParameters().toWwwFormUrlencoded(); } }
/** * Tool for managing / updating DOIs for projects and it's releases. */ public class DoiUpdateCmd extends AbstractMybatisCmd { private static final Logger LOG = LoggerFactory.getLogger(DoiUpdateCmd.class); private static final String ARG_KEY = "key"; private DoiService doiService; private DatasetConverter converter; private int key; private int releaseUpdated = 0; private int releaseCreated = 0; private int releasePublished = 0; private int sourceUpdated = 0; private int sourcePublished = 0; public DoiUpdateCmd() { super("doi", true, "Update all project, release and release source DOIs for the given project dataset key"); } @Override public void configure(Subparser subparser) { super.configure(subparser); subparser.addArgument("--"+ ARG_KEY, "-k") .dest(ARG_KEY) .type(Integer.class) .required(true) .help("Dataset key for project to update"); } @Override public void execute() throws Exception { Preconditions.checkNotNull(cfg.doi, "DOI configs needed to run the updater"); Preconditions.checkArgument(user != null, "User argument required to run the updater"); Preconditions.checkArgument(user.hasRole(User.Role.ADMIN), "Admin user required to run the updater"); // setup doiService = new DataCiteService(cfg.doi, jerseyClient); Validator validator = Validation.buildDefaultValidatorFactory().getValidator(); UserDao udao = new UserDao(factory, new EventBus(), validator); converter = new DatasetConverter(cfg.portalURI, cfg.clbURI, udao::get); try (SqlSession session = factory.openSession(true)) { DatasetMapper dm = session.getMapper(DatasetMapper.class); key = ns.getInt(ARG_KEY); Dataset d = dm.get(key); if (d == null) { throw NotFoundException.notFound(Dataset.class, key); } else if (d.getOrigin() != MANAGED) { throw new IllegalArgumentException("Dataset "+key+" is not a project but a "+d.getOrigin()+" dataset"); } // update project DOI Dataset project = dm.get(key); LOG.info("Update all DOIs for releases of project {}: {}", key, project.getTitle()); final var latestReleaseKey = dm.latestRelease(d.getKey(), true); LOG.info("Latest release of project {} is {}", key, latestReleaseKey); updateReleaseOrProject(project, false, null, null, dm); // list all releases in chronological order, starting with the very first release DOI prev = null; for (Dataset release : dm.listReleases(key)) { // ignore private releases, only public ones have a DOI if (release.isPrivat()) continue; final boolean isLatest = Objects.equals(latestReleaseKey, release.getKey()); updateReleaseOrProject(release, isLatest, project.getDoi(), prev, dm); if (release.getDoi() != null) { updateReleaseSources(release, isLatest); prev = release.getDoi(); } } } } private void updateReleaseOrProject(Dataset release, boolean isLatest, @Nullable DOI project, @Nullable DOI prev, DatasetMapper dm) { DOI doi = release.getDoi(); try { if (doi == null) { // issue a new DOI! doi = doiService.fromDataset(release.getKey()); release.setDoi(doi); dm.update(release); // persist doi var attr = converter.release(release, isLatest, project, prev); LOG.info("Issue new DOI {} for release {}", doi, release.getKey()); try { doiService.create(attr); releaseCreated++; doiService.publish(doi); releasePublished++; } catch (DoiException e) { LOG.info("Failed to create DOI {} for release {}. Try to do an update instead", doi, release.getKey(), e); updateDOI(doi, release, isLatest, project, prev); } } else { updateDOI(doi, release, isLatest, project, prev); } } catch (DoiException e) { LOG.error("Error updating DOIs for release {} with DOI {}", release.getKey(), doi, e); } finally { LOG.info("Total releases created={}, updated={}, published={}. Total sources updated={}, published={}", releaseCreated, releaseUpdated, releasePublished, sourceUpdated, sourcePublished); } } private void updateDOI(DOI doi, Dataset release, boolean isLatest, @Nullable DOI project, @Nullable DOI prev) throws DoiException { var data = doiService.resolve(doi); var attr = converter.release(release, isLatest, project, prev); LOG.info("Update DOI {} for release {}", doi, release.getKey()); doiService.update(attr); releaseUpdated++; if (!release.isPrivat() && data.getState() != DoiState.FINDABLE) { doiService.publish(doi); releasePublished++; } } private void updateReleaseSources(Dataset release, boolean isLatest) { try (SqlSession session = factory.openSession()) { LOG.info("Updating DOIs for {}release {} {}", isLatest ? "latest " : "", release.getKey(), release.getAlias()); var dsm = session.getMapper(DatasetSourceMapper.class); for (Dataset source : dsm.listReleaseSources(release.getKey())) { final DOI srcDoi = source.getDoi(); if (srcDoi != null && srcDoi.isCOL()) { try { var data = doiService.resolve(srcDoi); var attr = converter.source(source, null, release, isLatest); LOG.info("Update DOI {} for source {} {}", srcDoi, source.getKey(), source.getAlias()); doiService.update(attr); sourceUpdated++; if (!release.isPrivat() && data.getState() != DoiState.FINDABLE) { LOG.info("Publish DOI {} for source {} {} of public release {}", srcDoi, source.getKey(), source.getAlias(), release.getKey()); doiService.publish(srcDoi); sourcePublished++; } } catch (DoiException e) { LOG.error("Error updating DOI {} for source {} in release {}", srcDoi, source.getKey(), release.getKey(), e); } } } } } }
<filename>src/main/java/com/exasol/projectkeeper/validators/pom/plugin/LombokPluginValidator.java<gh_stars>1-10 package com.exasol.projectkeeper.validators.pom.plugin; import java.util.Collection; import java.util.function.Consumer; import org.w3c.dom.Node; import com.exasol.projectkeeper.ProjectKeeperModule; import com.exasol.projectkeeper.ValidationFinding; /** * Validator for lombok-maven-plugin. */ public class LombokPluginValidator extends AbstractPluginPomValidator { /** * Create a new instance of {@link LombokPluginValidator}. */ public LombokPluginValidator() { super("maven_templates/lombok-maven-plugin.xml"); } @Override public ProjectKeeperModule getModule() { return ProjectKeeperModule.LOMBOK; } @Override protected void validatePluginConfiguration(final Node plugin, final Collection<ProjectKeeperModule> enabledModules, final Consumer<ValidationFinding> findingConsumer) { verifyPluginPropertyHasExactValue(plugin, "executions", findingConsumer); } }
Cote de Pablo, Michael Weatherly | Photo Credits: Sonja Flemming/CBS More Sorry, Tiva fans: Cote de Pablo really will be leaving NCIS this season. And although we expected her character to get written out, is it possible the series will go so far as to kill Ziva? Cote de Pablo exiting NCIS That question is certainly raised in the sneak peek below as Homeland Security Director Tom Morrow (Alan Dale) says that Ziva is a target. Still, the promo does offer up some hope as Ziva and Tony (Michael Weatherly) are reunited — though that's likely short-lived given Pablo's impending exit. In July, it was announced that De Pablo would be leaving NCIS for undisclosed reasons. "I've had eight great years with NCIS and Ziva David," De Pablo said in a statement at the time. "I have huge respect and affection for Mark, Gary, Michael, David, Rocky, Pauley, Brian, Sean, all of the team and CBS. I look forward to finishing Ziva's story." Will you miss Ziva? How do you think she should leave the series? NCIS returns Tuesday, Sept. 24 at 8/7c on CBS. (Full disclosure: TVGuide.com is owned by CBS.) Related Articles on TVGuide.com
A man was shot in the foot and transported to Salem Hospital with non-life threatening injuries. Video and photo of shooting victim being loaded into ambulance by Jerry Freeman Salem-News.comm. (SALEM) - A Salem Police officer setting out to shoot a pit bull ended up shooting a man who apparently tried to place himself between the gun and the dog. A Salem Police officer was firing at the pit bull, a spokesman says, when 38-year old Steven Deleon jumped in, apparently trying to intervene in the situation. The incident occurred at 2020 Park Ave NE in Salem at approximately 3:20 pm on October 20. Salem Police Officers Darren Buchholz and Travis Brossard were in the residence speaking with people when they were reportedly attacked by a pit bull. Officer Buchholz says he attempted to push the dog away, but when the dog continued to attack he drew his firearm and shot the animal in self defense. Deleon was struck by the gunshot in the foot and was transported to Salem Hospital with non-life threatening injuries. Officers Buchholz and Brossard are not injured and will both be placed on administrative leave as is standard procedure while the incident is being investigated. The Oregon State Police will be coordinating and conducting this investagation. Salem Police Lt. Dave Okada tells Salem-News.com that this is standard procedure for the Salem Police Department to request the Oregon State Police conduct an independent investigation of incidents of this nature. The dog was shot and killed. Anyone with information regarding this investigation is asked to call the Oregon State Police at 1-800-452-7888. To "ShootPitbull Owners"... I own a pit bull. He is a Canine Good Citizen, a therapy dog for children and women in domestic violence shelters. He is almost done with his training to replace my current medical alert dog, since she is now 8 years old and approaching retirement (she is also a doberman... remember how 20 years ago people like you said the same horrible things about her breed?). The dog was IN THEIR HOME. Not running loose. Maybe the cops should learn to grow a pair and stop shooting peoples' pets for every little tiny thing. If the dog had truly attacked them, those officers would be injured at least. I don't believe a word they say. Who gave these cowboys guns and badges? Give them estrogen treatments and find them new jobs flipping burgers someplace. I'm just waiting for the day when a homeowner shoots back and kills one of these creeps. You I want to know something about that pitbull. It wasn't dangerous his name was Bully he was really old. I personally have met the dog and never once did I feel threatened. Trigger happy cops yeah you could say that but what I think is he was more scared of the dog. And you can't get a reliable story from the people that live in that house all of them are constantly on one. So brave... so tough.. Eliminate all threats around you with overwhelming force. Call yourself a hero, but you know are a coward. I agree with Douglas. This report is un-professional, it goes against the very basics of proper journalism. This stuff wouldnt' happen if people would stop buying these manhood accessories, not caring or respecting their dangerous nature. At the end of the day, most of these dogs can't be rehabilitated without serious assistance, so what do you do with an out of control pitbull. You have to put it down. Let it be a lesson. Don't buy dangerous animals/guns/substances if you are unwilling to take responsibility for it. They just HAD to shoot the dog. What a bunch of trigger happy cops. No wonder the community dislikes them so much.
<reponame>mariusvniekerk/dask-hivemetastore from __future__ import absolute_import import logging as log from dask_hivemetastore._thrift_api import ( get_socket, get_transport, TBinaryProtocol, ThriftClient, Table, StorageDescriptor, SerDeInfo, FieldSchema, Partition) import numpy as np from dask.dataframe import read_csv, read_parquet, read_table, concat, DataFrame if False: from typing import * class HiveCompatibleFormat(object): def __init__(self, input_format, output_format, serde): self.input_format = input_format, self.output_format = output_format self.serde = serde PARQUET = HiveCompatibleFormat( input_format=['org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'], output_format=['org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'], serde=[ 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe', # other deprecated formats are here 'parquet.hive.serde.ParquetHiveSerDe', ] ) DELIMITED = HiveCompatibleFormat( input_format=['org.apache.hadoop.mapred.TextInputFormat'], output_format=['org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'], serde=[ "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "org.apache.hadoop.hive.serde2.OpenCSVSerde", ] ) def hive_type_to_dtype(hive_typ): """ The type information from hive is stored as a string. These have the following forms PRIMITIVE_TYPE COMPLEX_TYPE<PARAMS> For now we consider only primitive types. """ types = { 'tinyint': np.int8, 'smallint': np.int16, 'int': np.int32, 'bigint': np.int64, 'float': np.float32, 'double': np.float64, 'decimal': np.object, 'timestamp': np.datetime64, 'date': np.datetime64, 'interval': np.timedelta64, 'string': np.object, 'varchar': np.object, 'char': np.object, } return types[hive_typ] class DaskMetaStoreWrapper(object): """Utility wrapper class that can extract the minimal required information from the hive metastore in order to create a dask dataframe. Don't construct this class directly use :meth:`connect` instead. """ def __init__(self, service): # type: (ThriftClient) -> None self.service = service def status(self): from dask_hivemetastore._gen.fb303.ttypes import fb_status status = self.service.getStatus() return fb_status._VALUES_TO_NAMES[status] def _get_table_info(self, tablename, database=None): # type: (str, Optional[str]) -> Table if database is None: database = 'default' return self.service.get_table(database, tablename) def _get_partitions(self, tablename, database=None, partition_filter=None): # type: (str, Optional[str], Optional[str]) -> List[Partition] if database is None: database = 'default' if partition_filter is not None: partitions = self.service.get_partitions_by_filter(database, tablename, partition_filter, max_parts=-1) else: partitions = self.service.get_partitions(database, tablename, max_parts=-1) return partitions def _remap_path(self, url, extension=''): """Utility to remap a url emitted from hive to one understood by dask""" return f'{url}/*{extension}' def table_to_dask( self, tablename, # type: str column_subset=None, # type: Optional[List[str]] database=None, # type: Optional[str] partition_filter=None, # type: Optional[str] **kwargs ): # type: (...) -> DataFrame """ :param tablename: Name of the hive table to load :param column_subset: Column names to load, optional :param database: Database (schema) that the table is in :param partition_filter: For partitioned tables specify a filter that partitions must adhere to as a string :param kwargs: Other arguments that are passed down to the underlying loaders :return: """ info = self._get_table_info(tablename, database) sd = info.sd # type: StorageDescriptor serde_info = sd.serdeInfo # type: SerDeInfo partition_keys = info.partitionKeys # type: List[FieldSchema] table_params = info.parameters if len(info.partitionKeys): partitions = self._get_partitions(database, partition_filter, tablename) # walk the list of partitions and concatenate the resulting dataframe dataframes = [] for partition in partitions: sd = partition.sd # type: StorageDescriptor dd = self._hive_metastore_data_to_dask( location=sd.location, serde_info=sd.serdeInfo, metastore_columns=sd.cols, table_params=table_params, column_subset=column_subset, kwargs=kwargs ) # TODO: convert the dtypes of the fields to the correct one. for col, part_key in zip(partition_keys, partition.values): params = {col.name: part_key} dd = dd.assign(**params) dataframes.append(dd) return concat(dataframes) else: return self._hive_metastore_data_to_dask( location=sd.location, serde_info=sd.serdeInfo, metastore_columns=sd.cols, table_params=sd.parameters, column_subset=column_subset, kwargs=kwargs ) def _hive_metastore_data_to_dask( self, serde_info, # type: SerDeInfo location, # type: str metastore_columns, # type: List[FieldSchema] table_params, # type: Mapping[str, str] column_subset=None, # type: Optional[List[str]] kwargs=None, # type: Optional[Dict[str, Any]] ): # type: (...) -> DataFrame kwargs = kwargs or {} if serde_info.serializationLib in PARQUET.serde: return self._parquet_table_to_dask(location, column_subset, kwargs=kwargs) elif serde_info.serializationLib in DELIMITED.serde: return self._delimited_table_to_dask( location=location, serde_info=serde_info, metastore_columns=metastore_columns, table_params=table_params, column_subset=column_subset, kwargs=kwargs ) def _parquet_table_to_dask( self, location, # type: str column_subset, # type: Optional[List[str]] kwargs, # type: Dict[str, Any] ): # type: (...) -> DataFrame return read_parquet(self._remap_path(location, extension='.parquet'), columns=column_subset, **kwargs) def _delimited_table_to_dask( self, serde_info, # type: SerDeInfo location, # type: str metastore_columns, # type: List[FieldSchema] table_params, # type: Mapping[str, str] column_subset, # type: Optional[List[str]] kwargs, # type: Dict[str, Any] ): # type: (...) -> DataFrame if serde_info.serializationLib == "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe": csv_args = dict( escapechar=serde_info.parameters.get('escape.delim'), lineterminator=serde_info.parameters.get('line.delim'), delimiter=serde_info.parameters.get('field.delim') ) elif serde_info.serializationLib == "org.apache.hadoop.hive.serde2.OpenCSVSerde": csv_args = dict( delimiter=serde_info.parameters.get("seperatorChar", ","), quotechar=serde_info.parameters.get("quoteChar", '"'), ) else: raise ValueError("Unexpected serde: {}".format(serde_info.serializationLib)) kwargs.update(csv_args) header = int(table_params.get("skip.header.line.count", 0)) - 1 if header < 0: header = None dd = read_csv( # TODO: Use the filesystem to see what the extensions are self._remap_path(location), names=[c.name for c in metastore_columns], dtype={c.name: hive_type_to_dtype(c.type) for c in metastore_columns}, header=header, **kwargs) if column_subset is not None: return dd[column_subset] else: return dd def connect(host, port=9083, timeout=None, use_ssl=False, ca_cert=None, user=None, password=<PASSWORD>, kerberos_service_name='hive_metastore', auth_mechanism=None, ifclass=DaskMetaStoreWrapper): """Connect to a Hive metastore and return a dask compatibility wrapper. """ log.debug('Connecting to Hive metastore %s:%s with %s authentication ' 'mechanism', host, port, auth_mechanism) sock = get_socket(host, port, use_ssl, ca_cert) if timeout is not None: timeout = timeout * 1000. # TSocket expects millis sock.setTimeout(timeout) transport = get_transport(sock, host, kerberos_service_name, auth_mechanism, user, password) transport.open() protocol = TBinaryProtocol(transport) service = ThriftClient(protocol) log.debug('sock=%s transport=%s protocol=%s service=%s', sock, transport, protocol, service) return ifclass(service)
<filename>CrawlerSystem/src/com/syntun/controller/DataTableFiledController.java<gh_stars>0 package com.syntun.controller; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.multipart.MultipartFile; import org.springframework.web.servlet.ModelAndView; import com.syntun.controller.systemlog.SysLogger; import com.syntun.datasource.DataSourceContextHolder; import com.syntun.entity.DataTableFiled; import com.syntun.etl.tools.BaseDao; import com.syntun.etl.tools.ConnectSql132; import com.syntun.etl.tools.ConnectSqlUtil; import com.syntun.etl.tools.ConvertSql; import com.syntun.etl.tools.InsertData132; import com.syntun.service.DataTableFiledService; import com.syntun.util.GenericController; import com.syntun.util.POIReadExcelTool; import net.sf.json.JSONArray; import net.sf.json.JSONObject; import javax.annotation.Resource; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.sql.Connection; import java.sql.Statement; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; /** * */ @Controller @RequestMapping(value = "/dataTableFiled") public class DataTableFiledController { @Resource(name = "dataTableFiledService") DataTableFiledService dataTableFiledService; @RequestMapping(value = "/login") public ModelAndView login() { ModelAndView mv=new ModelAndView(); DataSourceContextHolder.setDbType("ds_mop"); List<DataTableFiled> dataTableFiled = dataTableFiledService.login(); mv.addObject("dataTableFiled",dataTableFiled); mv.setViewName("/login"); return mv; } @RequestMapping(value = "/getAllList") public void getAllList(HttpServletRequest request, HttpServletResponse response) throws Exception { String filedName = request.getParameter("filedName"); HashMap<String, Object> params = new HashMap<String, Object>(); params.put("filedName",filedName); DataSourceContextHolder.setDbType("ds_mop"); List<DataTableFiled> resultMap = dataTableFiledService.getAllList(params); JSONArray json = JSONArray.fromObject(resultMap); response.getWriter().print(json); } @RequestMapping(value = "/getList") public void getList(HttpServletRequest request, HttpServletResponse response) throws Exception { String filedName = request.getParameter("filedName"); int page = Integer.parseInt(request.getParameter("page")); int limit = Integer.parseInt(request.getParameter("limit")); int start = (page-1)*limit; HashMap<String, Object> params = new HashMap<String, Object>(); params.put("page",start); params.put("limit",limit); params.put("filedName",filedName); DataSourceContextHolder.setDbType("ds_mop"); int count = dataTableFiledService.getCount(params); List<DataTableFiled> result = dataTableFiledService.getList(params); GenericController gen = new GenericController(); List<Object> res = new ArrayList<>(); for(Object e : result){ Object obj = (Object)e; res.add(obj); } String jsons = gen.getResultJson(count, res); response.getWriter().print(jsons.toString()); } @RequestMapping(value = "/addRecord") @SysLogger(modelName = "添加记录", methodType = "addRecord") public void addRecord(HttpServletRequest request, HttpServletResponse response) throws Exception { String filedName = request.getParameter("filedName"); String tableId = request.getParameter("tableId"); String filedDataType = request.getParameter("filedDataType"); String defaultValue = request.getParameter("defaultValue"); String filedDataFun = request.getParameter("filedDataFun"); HashMap<String, Object> params = new HashMap<String, Object>(); params.put("filedName",filedName); params.put("tableId",tableId); params.put("filedDataType",filedDataType); params.put("defaultValue",defaultValue); params.put("filedDataFun",filedDataFun); DataSourceContextHolder.setDbType("ds_mop"); dataTableFiledService.addRecord(params); } @RequestMapping(value = "/delRecord") @SysLogger(modelName = "删除记录", methodType = "delRecord") public void delRecord(HttpServletRequest request, HttpServletResponse response) throws Exception { String filedId = request.getParameter("filedId"); HashMap<String, Object> params = new HashMap<String, Object>(); params.put("filedId",filedId); DataSourceContextHolder.setDbType("ds_mop"); dataTableFiledService.delRecord(params); } @RequestMapping(value = "/delAllRecord") @SysLogger(modelName = "批量删除", methodType = "delAllRecord") public void delAllRecord(HttpServletRequest request, HttpServletResponse response) throws Exception { String items = request.getParameter("filedIds"); List<String> delList = new ArrayList<String>(); String[] strs = items.split(","); for (String str : strs) { delList.add(str); } DataSourceContextHolder.setDbType("ds_mop"); dataTableFiledService.delAllRecord(delList); } @RequestMapping(value = "/editRecord") @SysLogger(modelName = "修改记录", methodType = "editRecord") public void editRecord(HttpServletRequest request, HttpServletResponse response) throws Exception { String filedId = request.getParameter("filedId"); String filedName = request.getParameter("filedName"); String tableId = request.getParameter("tableId"); String filedDataType = request.getParameter("filedDataType"); String defaultValue = request.getParameter("defaultValue"); String filedDataFun = request.getParameter("filedDataFun"); HashMap<String, Object> params = new HashMap<String, Object>(); params.put("filedId",filedId); params.put("filedName",filedName); params.put("tableId",tableId); params.put("filedDataType",filedDataType); params.put("defaultValue",defaultValue); params.put("filedDataFun",filedDataFun); DataSourceContextHolder.setDbType("ds_mop"); dataTableFiledService.editRecord(params); } @RequestMapping(value = "/addTable",produces = "text/plain;charset=utf-8") //@SysLogger(modelName = "创建表格", methodType = "addTable") public void addTable(HttpServletRequest request, HttpServletResponse response) throws Exception { String dataIP = request.getParameter("dataIP"); String databaseName = request.getParameter("databaseName"); String tableName = request.getParameter("tableName"); String items = request.getParameter("columns"); JSONObject resObj = new JSONObject(); Connection conn = ConnectSqlUtil.getConn(dataIP,databaseName); Statement stmt = conn.createStatement(); String sql = "CREATE TABLE " + tableName + "("; String sqlKey = " PRIMARY KEY ("; String sqlEnd = " ) ENGINE=MyISAM AUTO_INCREMENT=62 DEFAULT CHARSET=utf8;"; try { //System.out.println(items); JSONArray jsonArray = JSONArray.fromObject(items);//把String转换为json //System.out.println(jsonArray.size()); for(int i=0;i<jsonArray.size();i++){ JSONObject job = jsonArray.getJSONObject(i); // 遍历 jsonarray 数组,把每一个对象转成 json 对象 if(job.get("column").equals("")){ //continue; break; } // 字段名 数据类型(长度) if(job.get("dataType").equals("date")||job.get("dataType").equals("datetime")||job.get("dataType").equals("timestamp")||job.get("dataType").equals("text")){ sql = sql+job.get("column")+" "+job.get("dataType"); }else{ sql = sql+job.get("column")+" "+job.get("dataType")+"("+job.get("length")+")"; } // 是否主键 if(job.get("primaryKey").equals("T")){ if(sqlKey.equals(" PRIMARY KEY (")){ sqlKey = sqlKey + job.get("column"); }else if(job.get("primaryKey").equals("F")){ sqlKey = sqlKey +","+ job.get("column"); } } // 是否非空 if(job.get("notNull").equals("T")){ sql = sql+" NOT NULL"; }else if(job.get("notNull").equals("F")){ sql = sql+" DEFAULT NULL"; } // 是否自增 if(job.get("autoIncrement").equals("T")){ sql = sql+" AUTO_INCREMENT"; }else if(job.get("autoIncrement").equals("F")){ } // 默认值 if(!job.get("default").equals("")){ if(job.get("dataType").equals("timestamp")){ sql = sql + " DEFAULT "+job.get("default"); }else{ sql = sql + " DEFAULT '"+job.get("default")+"'"; } } // 注释 sql = sql + " COMMENT '"+job.get("comment")+"',"; } sqlKey = sqlKey + ")"; sql = sql + sqlKey + sqlEnd; System.out.println(sql); if(0 == stmt.executeUpdate(sql)){ resObj.put("code", 0); resObj.put("msg", "成功创建表"+tableName); }else{ resObj.put("code", 1); resObj.put("msg", "创建失败"+tableName); } } catch (Exception e) { //System.out.println(e.getMessage()); resObj.put("code", 2); resObj.put("msg", "创建失败:"+e.getMessage()); } stmt.close(); ConnectSqlUtil.push(conn); ConnectSqlUtil.closeConnection(conn); PrintWriter out=response.getWriter(); out.println(resObj); out.flush(); out.close(); } @RequestMapping(value = "/upload") @ResponseBody @SysLogger(modelName = "批量插入", methodType = "upload") public JSONObject upload(MultipartFile file, HttpServletRequest servletRequest) throws IOException { JSONObject resObj = new JSONObject(); // request.getSession().getServletContext().getRealPath("/"); //服务器地址 SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss"); Date day = new Date(); String dateDay = df.format(day); String fileName = file.getOriginalFilename(); String fileName0 = fileName.substring(0, fileName.indexOf(".")); String fileName1 = fileName.substring(fileName.indexOf(".")); fileName = fileName0 + "-" + dateDay + fileName1; String path = "/home/CrawlerSystem/uplodExcel/dataTableFiled"; //String path = "D:\\upload"; File dir = new File(path,fileName); if(!dir.exists()){ dir.mkdirs(); } file.transferTo(dir); String filePath = file.toString(); System.out.println(filePath); String excelPath = "/home/CrawlerSystem/uplodExcel/dataTableFiled/"+fileName; //String excelPath = "D:/upload/dataTableFiled/"+fileName; //String excelPath = filePath.replace("\\", "/"); List<String> dataTatal = new ArrayList<String>(); List<String> fieldList = new ArrayList<>(); List<HashMap<String, String>> excelData = new ArrayList<HashMap<String, String>>(); POIReadExcelTool poi = new POIReadExcelTool(); List<List<String>> list = poi.read(excelPath); // 读取表格字段列表 Connection conn132 = ConnectSql132.getConn(); List<String> fieldList132 = BaseDao.getFieldLowerCase("xitong.data_table_filed_list", conn132); ConnectSql132.push(conn132); //excel数据条数 int ex = 0; boolean ifRun = true; if (list != null) { ex = list.size() - 1; for (int i = 0; i < list.size(); i++) { HashMap<String, String> dateMap = new HashMap<String, String>(); //System.out.print("第" + (i) + "行"); List<String> cellList = list.get(i); //System.out.print(" 本行" + cellList.size() + "列"); if(i==0){ // 遍历第一行转换为小写 for(String field : list.get(i)){ fieldList.add(field.toLowerCase()); } // 表格列和数据库列个数不相等直接退出 if(fieldList.size() != fieldList132.size()){ ifRun = false; break; } // 此列标题不包含在数据表字段,结束循环; for(String field : fieldList){ if(!fieldList132.contains(field)){ ifRun = false; break; } } if(!ifRun){ break; } }else{ for (int j = 0; j <= 5; j++) { if(cellList.get(j).indexOf(".") != -1){ dateMap.put(fieldList.get(j),cellList.get(j).substring(0, cellList.get(j).indexOf("."))); }else{ dateMap.put(fieldList.get(j),cellList.get(j)); } } excelData.add(dateMap); } } //System.out.println(excelData); if(excelData.size()!=0){ for(HashMap<String, String> shopMap : excelData){ String sql = ConvertSql.getSql("xitong.data_table_filed_list", fieldList, shopMap); dataTatal.add(sql); } Thread t1 = new Thread(new InsertData132(dataTatal)); t1.start(); boolean isAlice = true; while(isAlice){ if(!t1.isAlive()){ isAlice = false; resObj.put("code", 0); resObj.put("msg", "上传Excel表格中数据:"+ex+"条"); resObj.put("data", ""); } } }else if(!ifRun){ resObj.put("code", 1); resObj.put("msg", "上传Excel表格字段格式错误"); resObj.put("data", ""); }else{ resObj.put("code", 0); resObj.put("msg", "上传Excel表格中数据:"+ex+"条"); resObj.put("data", ""); } }else{ resObj.put("code", 0); resObj.put("msg", "上传Excel表格中数据:"+ex+"条"); resObj.put("data", ""); } // String st = delExcel(excelPath); // if(st.equals("YES")){ // String fileNameBak = file.getOriginalFilename() + "." + dateDay; // File dirBak = new File(path,fileNameBak); // if(!dirBak.exists()){ // dirBak.mkdirs(); // } // //file.transferTo(dirBak); // } return resObj; } public String delExcel(String filePath){ File file = new File(filePath); // 如果文件路径所对应的文件存在,并且是一个文件,则直接删除 if (file.exists()) { if (file.delete()) { System.out.println("删除成功!"); return "YES"; } else { System.out.println("删除失败!"); return "NO"; } } else { System.out.println("删除单个文件失败:不存在!"); return "NOerr"; } } }
<reponame>xmxm00/sparkbench from pyspark.sql import SparkSession from pyspark import SparkContext from pyspark.sql.types import StructType, StructField, StringType, IntegerType from pyspark.sql.functions import udf, col, from_json, flatten, explode, desc, count from datetime import datetime import argparse def getChart(df): df=df.withColumn("patient_ID",df["patient.patientID"]) df=df.withColumn("patient_age",df["patient.age"]) df=df.withColumn("patient_sex",df["patient.sex"]) df=df.withColumn("year",df["visit.date.year"]) df=df.withColumn("month",df["visit.date.month"]) df=df.withColumn("day",df["visit.date.day"]) df=df.withColumn("hospital_name",df["hospital.hospitalID"]) df=df.withColumn("treatment_name",df["treatment.name"]) df = df.select( df["patient_ID"], df["patient_age"], df["patient_sex"], df["hospital_name"], df["doctor"], df["treatment_name"], df["year"], df["month"], df["day"], ).orderBy("patient", ascending=False) return df def getReceipt(df): df=df.withColumn("hospital_name",df["hospital.hospitalID"]) df=df.withColumn("hospital_address",df["hospital.address"]) df=df.withColumn("hospital_contact",df["hospital.contact"]) df=df.withColumn("year",df["visit.date.year"]) df=df.withColumn("month",df["visit.date.month"]) df=df.withColumn("day",df["visit.date.day"]) df=df.withColumn("patient_ID",df["patient.patientID"]) df=df.withColumn("treatment_name",df["treatment.name"]) df=df.withColumn("treatment_price",df["treatment.price"]) df = df.select( df["hospital_name"], df["hospital_address"], df["hospital_contact"], df["year"], df["month"], df["day"], df["patient_ID"], df["treatment_name"], df["treatment_price"], df["payment"], ).orderBy("patient", ascending=False) return df if __name__=="__main__": sc = SparkContext("local","etl") spark=SparkSession.builder.appName("etl process").getOrCreate() schema = StructType([ StructField("patient", StructType([ StructField("patientID", StringType(), True), StructField("age", IntegerType(), True), StructField("sex", StringType(), True)]), True), StructField("doctor", StringType(), True), StructField("hospital", StructType([ StructField("hospitalID", StringType(), True), StructField("address", StringType(), True), StructField("contact", StringType(), True)]), True), StructField("treatment", StructType([ StructField("name", StringType(), True), StructField("price", IntegerType(), True)]), True), StructField("visit", StructType([ StructField("date", StructType([ StructField("year", IntegerType(), True), StructField("month", IntegerType(), True), StructField("day", IntegerType(), True)]), True)]), True), StructField("payment", StringType(), True)]) #인자 parser = argparse.ArgumentParser() parser.add_argument("-t", "--target", help="target info", default="chart") parser.add_argument("-if","--inputformat", help="input format", default="json") parser.add_argument("-of", "--outputformat", help="output format", default="parquet") parser.add_argument("-o", "--output", help="output path", default="parquet") args = parser.parse_args() df = spark.read.format(args.inputformat).load("./data") spark.sparkContext.setLogLevel("ERROR") print("Original Data") df.show(50) df.printSchema() if args.target == "chart": df = getChart(df) df1 = df.groupBy("doctor").agg(count("patient_ID")).withColumnRenamed("count(patient_ID)", "patient_num") elif args.target == "receipt": df = getReceipt(df) df1 = df.groupBy("hospital_name").sum("treatment_price").withColumnRenamed("sum(treatment_price)", "profit").\ withColumnRenamed("hospital_name", "hospital") print("Hospital Profit") print("Parsed Data") df.show(50) df.printSchema() print("Filtered Data") df1.show(50) df1.printSchema() df.coalesce(1).write.format(args.outputformat).mode("overwrite").save(args.output)
A key player in the Tenderloin and Mid-Market's pornographic history has passed away. Habib Carouba, the former owner of Market Street Cinema and Campus Theater (now home to Power Exchange), died on Wednesday, October 19th. According to his obituary, "Habib Carouba died as he lived, on his own terms, in his own time, and just as he had hoped: without suffering, struggling or pain." The Daly City resident was an important figure along the Mid-Market corridor and in the Tenderloin during the gritty, bygone days when XXX adult movies were still being shot on film and screened in theaters. Carouba operated both gay and straight cinemas. Filmmaker Mike Stabile, who is making a series of documentaries about the history of the San Francisco adult film industry, told us that Carouba got his start in the business as a bookstore owner. "Habib got started in the business selling magazines and dirty books, when hardcore porn was still illegal," Stabile tells us. "He and his partner, Harold Greenland, were running different bookstores, and moving into abandoned or run-down theaters and turning them into adult movie houses. First the nudist pictures, then slowly pushing the envelope to beaver films (where you could see pubic hair) and eventually hardcore." It was very much an underground business in those days. "Theaters were very vulnerable to raids and vice busts, so it wasn't a crowded market," Stabile recalls. "You had to be willing to take the risk. Before it was entirely legal, [Carouba] would buy movies directly from people like the Mitchell Brothers, who later went on to have their own theater. Eventually, they became competition." Indeed, Carouba had several brushes with the law—some of which are comical in retrospect. For example, when one of his theaters was raided, he asked to call a federal judge as a witness, "because he'd seen the judge at his theater as a patron on his lunch break," Stabile said. "Of course, the case was dismissed." Carouba also told Stabile about the time when mayoral candidate Dianne Feinstein, who'd made fighting pornography one of her pet causes, raided one of his theaters with an army of police officers. "It made headlines, but it was also amazing promotion for his theater," Stabile said. "The next day, there were lines around the block." Stabile's film "Smut Capital of America," which includes an interview with Carouba. Carouba was born in 1932, in the town of Jaffa in what was then Palestine. He emigrated to the U.S. in 1948, ultimately serving in the Army. "I credit them for making a man out of a scrappy little kid from Jaffa," he was quoted as having said in his Chronicle obituary. Carouba learned English working as a busboy and a waiter while he attended business school. In addition to owning adult cinemas, he was also a restaurateur, owning businesses as far away as New York and Hawaii. Many have said that he cut a dashing figure in his suits—Frank Sinatra was his fashion inspiration. Carouba is survived by Alice Clancy Carouba, his wife of 63 years, their children and grandchildren, and mourned by many other relatives, friends and business associates. Stabile says that Carouba made an indelible mark on the San Francisco landscape. "I think that he was one of the true pioneers of adult, not just in San Francisco, but nationally," Stabile said. "At a time when there was next to no sex education, where even married couples didn't know what to do, and when sexuality of all types was filled with shame and fear, he took incredible risks to distribute it. People make fun of porn, or get embarrassed, but it really is an important cultural force, particularly for gay men, and a force for good." Stabile urges people to vote No on 60 as a way of honoring Carouba's legacy. You can see an interview with Habib Carouba in Stabile's short documentary Smut Capital Of America.
Immunohistochemical identification of Cowdria ruminantium in formalin-fixed tissue sections from mice, sheep, cattle and goats. An immunohistochemical staining technique in which a monospecific serum was used against the major antigenic protein -1 (MAP-1) of Cowdria ruminantium, was evaluated for the detection of C. ruminantium in formalin-fixed tissues of experimentally infected mice and field cases of heartwater in sheep, cattle and goats. Mice were infected with the mouse-pathogenic stocks: Mara, Kwanyanga, Welgevonden, Nonile, Vosloo, Kmm, Mali and Omatjenne. In all these cases and in the naturally infected cattle, sheep and goats, Cowdria colonies were identified as clearly-defined, brown-staining rickettsial colonies within the cytoplasm of endothelial cells. No positive staining was observed in the control group. This technique was shown to be reliable for detecting infection with C. ruminantium in the formalin-fixed tissues of mice and domestic ruminants.
Additive synthesis is a signal synthesis technique based on the Fourier Theorem. This theorem states any signal can be decomposed into a set of constituent sine waves, and that the sum of the constituents will reconstitute the original. Additive synthesis is classified as a receiver-based synthesis algorithm, but differs from receiver-based schemes, such as subtractive synthesis and sampling, in that it is represented in the spectral (frequency) domain rather than the time domain. There are many benefits in the use of additive synthesis for sound production in computer music applications. These include expressive musical control over fine timbral distinctions, perceptually relevant parameterizations, sample rate independence of timber description, availability of many analysis techniques, high control bandwidth, and multiple dimensions for resource allocation/optimization. The challenge of the additive synthesis technique is the computational intensity of the separately controllable sinusoidal partials. A single low frequency piano note can require hundreds of time-varying sinusoids for accurate reproduction. Musically effective use of additive synthesis in live performance can require the ability to control many hundreds or even thousands of sinusoidal partials in real-time. This computational challenge is addressed by resolving two issues: which hardware architecture to use and which sinusoid generation algorithm to use on the selected architecture. Digital Signal Processors or vector processors are a good selection for the data type and associated computational demands. Unfortunately, such architectures do not always support full-range (i.e., floating-point) arithmetic; fixed point may be all that is provided. There is always a large demand for low-cost implementations. Therefore, it is desirable to be able to exploit a relatively inexpensive, moderate-precision arithmetic hardware architecture, such as a 16-bit processor. A number of sinusoidal partial production techniques may be used on a selected hardware architecture. These techniques can be placed in three classes: those that implement recursive filters, those using table-lookup, or those that work in the transform-domain using techniques, such as the inverse fast fourier transform. The transform-domain approach is most advantageous for applications requiring many sinusoids and for which some error in phase and amplitude and some latency is acceptable. The lookup technique is the most widely used for applications requiring a few sinusoids at a very high data rate, such as radio frequency communications. Recursive oscillators have several advantages, including the inherent fine-grain exposure of data parallelism, the far more limited demand on the memory system compared to table look-ups, the lower induced latency than with a transform-domain approach, the latency flexibility, and/or the attainable phase accuracy. The primary problem with digital recursive oscillators is managing long-term stability as rounding and truncation errors accumulate. Another problem with recursive oscillators is providing sufficient frequency coefficient resolution. In view of the foregoing, it would be highly desirable to provide an improved technique for processing real-time partials on a general purpose hardware architecture. Ideally, the technique could be readily implemented on a moderate-precision arithmetic hardware architecture, such as a 16-bit processor. The technique should address the problem of error accumulation inherent in recursive methods. In addition, the technique should provide sufficient frequency coefficient resolution.
Beach golf Beach golf is a variation of golf mainly played on sandy beaches. It was devised with the aim of a simplified and more accessible version of golf. The game is played along a distance of about 2 kilometres (1.2 miles) on which the players, two per team, with as few strokes as possible, have to reach the final hole, hitting a soft polyurethane ball with a classic golf club. Rules Regulations have been created for the Beach Golf Event, aiming to anticipate various kinds of situations. The main rule states that people and things constitute natural obstacles on the path. The game takes place along a path of 2 km of sandy coastline on the beach, where 40 teams of two players each (a pro and a beginner), challenge each other to arrive at the final hole with the fewest possible strokes. The game-track is not bounded by any type of border and is punctuated by beach umbrellas and bathers that become natural obstacles throughout the competition. Because of the crowded place where the event takes place, the ball used is made of soft polyurethane foam to prevent injury to anyone who is accidentally hit during the game. The route is accompanied throughout its track by music and entertainment that involve, in an original way, the audience that becomes active in the field of play. The range of the golfer beach is bordered by a protection zone of about 5 meters, supported by three girls on the way to accompany throughout the game. Team Each team is composed of two players, a low-handicap player and a high-handicap one. The sum of the two handicaps split approximately to the next lower (e.g.: mean 17.5, the team is hcp 17) and that is the team’s handicap that show how many improvements can benefit the team. The game is played with only one beaten ball each turn. It is admitted that the one it replaces the other, in case you lose stroke and distance. Penalty: loss of stroke and distance. Game It is a stroke-game competition held in two rounds. Two types of balls are used: a soft rubber, totally harmless ( 69 mm in diameter ) which is used along the track and a regular one on the green. There is no time limit. Players have the opportunity to stop to have a lunch break, however, teams must respect the assigned starting time. Track This goes along the shore of any beach. When the Beach Golf is held in Pescara, we start from Paolucci Square and arrive at Stadium of the Sea, close to Cascella’s Ship. The track has a length of 1600 meters and a width that take the whole beach. The starting area The starting area is located where the first ball is hit. The area consists of a synthetic rug 1.50x1.50 m2. The ball is placed on the appropriate tee and both players choose a tee shot, placing the balls in the best possible way. The honor is given (noblesse oblige) to the lower handicap player. Any missed shot or barely touched balls are considered as a shot along the path. If the ball is dropped from the tee without being touched can be repositioned to make the shot. Each team starts three minutes after the previous one. The delay allowed is ten minutes after the departure of the next team. The laggard team start for last with a penalty of five shots. Equipment The equipment used consist in a classic golf club, expanded foam balls and a protection belt. Clubs Preferably sand or wedge type. Ball The ball is made of polyurethane foam with a diameter of 7 cm and a weigh of 35g approximately. Its composition makes them harmless, allowing to play anywhere. Protection belt This is held by the Caddy Girls and has a very important function which is to guarantee the safety of bathers, the players and anyone who is on the game-track. This is what makes the Beach Golf the only sport in the world where viewers are on the pitch and involved as active part in the game. Caddy Girls Caddy Girls act both as game-judges and hostesses during the event. Their role is to ensure that the game takes place in a proper and safe way. The Caddy Girls support a protection belt around the area of action, aiming to allow the golfer to play in peace among the bathers, beach chairs and umbrellas. The BGSA has planned a special calendar, featuring the Beach golf Caddy Girls.
Single Cigar Price and Availability in Communities With and Without a Cigar Packaging and Pricing Regulation Single cigars are available for sale throughout the tobacco retail environment, are often sold for prices as low as 49 cents, and are available in flavors that appeal to youth. Since 2012, 151 municipalities in Massachusetts have enacted a minimum cigar packaging and pricing regulation that increases the price of a single cigar to a minimum of $2.50 and the price of multi-packs of 2 cigars to a minimum of $5.00. We used pricing data collected from retailers across the state to measure the effect of the regulation on price and availability of single cigars over the long term. From 2014 through 2018, the statewide average price of single cigars increased from $1.35 to $1.64, concurrent with a decrease in statewide availability. Prices of single cigars were higher in communities with the regulation but also rose over time in communities without the regulation. The increased price and decreased availability of single cigars may reduce youth exposure and access to these products. Introduction Following the 2009 Family Smoking Prevention and Tobacco Control Act, which banned the sale of candy-flavored, fruitflavored, and other flavored cigarettes, the largest cigarette manufacturers purchased existing cigar brands and produced cigars that were available in a variety of youth-attractive flavors, individually packaged in bright colors, and sold for as low as 49 cents each. From 2006 through 2010, revenue from flavored cigar sales nearly doubled among retailers in the greater Boston area, and by 2010, more than 100 different flavors of cigars were on the market. Data for this same period show a rise in use of cigars and cigarillos by Massachusetts youth. The retail environment is a major source of exposure and access to tobacco for youth, and policies that increase price and reduce availability of tobacco products in the retail environment are effective in curbing youth use. In 2012, Boston became the first municipality in Massachusetts to implement a cigar packaging and pricing regulation (CPPR) that raises the minimum price at which single cigars or cigarillos could be sold. Studies conducted in Minneapolis and Boston demonstrated high retailer compliance with similar regulations. Ours is the first study to examine statewide single cigar price and availability of 3 cigar brands over a 5-year period. Each year, the Massachusetts Tobacco Cessation and Prevention Program (MTCP) engages with local enforcement agents and a contracted data collection vendor to visit a large representative sample of tobacco retailers in Massachusetts and administer a survey that obtains the price and availability of different tobacco products. In odd-numbered years, the Massachusetts Youth Risk Behavior Survey (MYRBS) is administered to a representative sample of high schools in Massachusetts to collect data on youth tobacco use, including cigars. We used data from both surveys to examine single-cigar availability and price over a 5-year period in Massachusetts and statewide trends in youth cigar use during the same period. Purpose and Objectives Marketing of cigars, cigarillos, and little cigars closely follows the historic pattern of tobacco industry marketing practices: use of social media, celebrity endorsements, targeted advertisements to youth and African-American populations, and increased availability in communities of color. Cigars and cigarillos are often cheaper than cigarettes, which may make them more accessible to youth, low-socioeconomic populations, and communities of color, populations all demonstrated to be price-sensitive to tobacco. MYRBS surveillance data show that in 2011, high school youth's use of cigars (14.3%) surpassed their use of cigarettes (14%) for the first time. Later surveys indicated that approximately 15% of youth reported that they obtained their tobacco directly or indirectly at a retail store. In Massachusetts, each municipality (of 351 total) has the authority to pass health regulations, including point-of-sale tobacco control policies. CPPR requires tobacco retailers to price single cigars for a minimum of $2.50 and multi-packs of 2 or more cigars for a minimum of $5.00, although each municipality has the option to amend policy language. Violations result in tiered fines, with multiple violations resulting in permit suspension. MTCP-funded Massachusetts Board of Health programs and trade associations -Massachusetts Municipal Association, Massachusetts Association of Health Boards, and Massachusetts Health Officers Association -provide technical assistance for municipalities that consider passing tobacco control policies, including model regulation language and community mobilization at local hearings. Funded Massachusetts Board of Health programs provide retailer education and enforcement, allowing for a stable infrastructure that ensures high retailer compliance. Although some municipalities do not directly receive MTCP funds, enforcement is promoted and conducted in these municipalities, with MTCP-funded technical assistance provided by the Massachusetts Health Officers Association. State and federal policies that raise the price of cigarettes have been successful in reducing youth use of cigarettes through minimum price laws, excise taxes, minimum packaging, and the prohibition of certain flavors. However, lowering prices is one tactic historically used by the tobacco industry to increase demand among price-sensitive populations, including youth. Research has demonstrated that increases in cigarette prices have been associated with a reduction in youth use. Like flavored cigarettes, flavored cigars have been promoted by the industry as starter products among youth, using flavors to mask the harsh tobacco taste. National data indicate that flavored cigars and cigarillos account for more than a third of cigar sales and half of cigarillo sales. A reduction in availability of single cigars may also address youth access, exposure, and use of flavored tobacco products. Evaluation Methods Pricing survey. The pricing survey collects retailer data such as establishment name, address, store type (eg, gas station, convenience store), and whether the retailer is part of a chain or independently owned. The survey measures price and availability of 3 major cigarillo brands: Dutch Master, Black and Mild, and Garcia y Vega Game, chosen because of their prevalence in Massachusetts. All prices presented in this article are pre-tax prices to allow for comparison across brands. Pricing survey sampling. MTCP engages with 2 groups of data collectors to conduct the pricing survey. Local enforcement agents conduct the surveys in 100% of retailers in 186 municipalities (with and without CPPR) where enforcement work is funded. In the remaining unfunded communities with at least 1 retailer present, MTCP contracts with JSI Research and Training Institute, Inc. (JSI) to perform data collection. MTCP maintains a database of all active tobacco retailers in the state from which a simple random sample of retailers in both funded and unfunded regions is drawn each quarter of the year (3-month periods). Because randomization occurs on the retailer level and not the municipal level, a representative sample of retailer data is available for each quarter throughout the year. Massachusetts Youth Risk Behavior Survey. Every odd year, the Massachusetts Department of Elementary and Secondary Education and the Massachusetts Department of Public Health conduct the MYRBS to monitor trends of health risk behaviors among high school students. Through a random selection process, a representative sample of schools across the state is chosen to participate; within each school, classes from grades 9 to 12 are randomly selected to be surveyed. Student participation is voluntary. Surveys are administered by the Center for Survey Research at the University of Massachusetts Boston, which also prepares data for analysis, including weighting the data according to Centers for Disease Control and Prevention (CDC) protocol. Respondents are asked about their cigar use: "During the past 30 days, on how many days did you smoke cigars, cigarillos, or little cigars?" with response options that ranged from "0 days" to "all 30 days." Respondents were considered current users if they indicated use in the past 30 days. Data analysis. For each year, mean price of each brand and an aggregate mean price for all 3 cigar brands combined were calculated overall for the state and for communities with and without the CPPR. Single-cigar availability was also calculated overall for the state by individual cigar brand and aggregated for communities with and without the CPPR. Data were weighted by region and store type to account for the variation in completion rates (retailers successfully surveyed) in funded and unfunded regions, because data collectors in MTCP-funded communities are likely to have established relationships with retailers. Because of the nature of policy implementation, the CPPR within individual municipalities passed and took effect at different points over the 5 years. Individual municipalities typically provided an adequate amount of time for retailers to comply, ranging from 3 months to 1 year, so the policy effective date was used to classify whether or not a community had the regulation at the time of data collection. Communities were classified by either having a CPPR or not, despite individual variations in policy that may be present in a small subset of municipalities. At the time of this study, only aggregated numbers were available, so statistical testing or modeling could not be completed. Results The average price of single cigars in Massachusetts increased steadily each year from 2014 through 2018, from $1.35 to $1.64 (Table), and availability of single cigars decreased statewide. In 2014, single cigars were available in 49% of retailers across the state. By FY 2018, single cigars were available in only 21% of retailers. The price of single cigars was higher in communities with the regulation than in communities without it (Table). In communities with the CPPR, the price increase of single cigars (aggregated) ranged from $2.24 to $2.41. Over time, prices of single cigars increased in communities without the regulation. The price of Garcia y Vega Game single cigars has increased from under a dollar ($0.89) to $1.22 by FY 2018 in communities without the CPPR. Over time, availability of single cigars decreased in communities with a CPPR. From 2014 to FY 2018, availability of single cigars (aggregated) decreased from 28% to 14% in communities with the regulation. Trends over time suggest that availability of single cigars also decreased in communities without the regulation. Although availability overall for Black and Mild cigars remained steady, availability for both Dutch Master and Garcia y Vega Game single cigars dropped substantially across the state (Dutch Master, from 50% to 12%; Garcia y Vega, from 42% to 6%). MYRBS data indicated that from 2011 through 2017, current use of cigars decreased from 14.3% to 6.7% (Figure). Implications for Public Health Data for Massachusetts show an increase in the price of single cigars in several municipalities over the 5-year period. This study is the first to show that over time, with increasing policy coverage across the state, the price of single cigars increased and the availability of single cigars also decreased in communities that had not implemented the policy. The substantial statewide coverage of the CPPR may reduce youth access and youth use of cigars or cigarillos. However, other factors may affect cigar use, because youth may be switching instead to other popular nicotine products, such as e-cigarettes. Other tobacco policies passed on a municipal level, such as age restrictions, restrictions of sales of flavored tobacco products, and banning the sale of tobacco in pharmacies may also affect youth access and use. This study has several limitations. We presented aggregated pricing and availability data, which do not allow for statistical testing; thus, we cannot directly attribute the observed outcomes to the policy. Data were unavailable before 2012, when the first CP-PR was passed in Massachusetts, so we did not have a true baseline period. We used pre-tax prices for comparison purposes, and the final price may be different because of coupons or taxes. Data collection was switched from calendar year to fiscal year, leaving a gap in 2015 data. Future analysis should use individuallevel retailer data to ascertain the effect of the CPPR, controlling for other tobacco control policies, community demographics, variation in policy language, and funding status. Tobacco industry influence remains pervasive in the point-of-sale retail environment, in which youth are exposed to a variety of flavored tobacco products, advertisements, and cheap prices. A comprehensive approach to addressing tobacco industry tactics by adopting policies like the CPPR, alongside other point-of-sale policies, such as restrictions on the sale of flavored tobacco products, may increase price and reduce exposure, access, and ultimately youth use. 104 (
Metagenomic Analysis of Bacterial Communities of Antarctic Surface Snow The diversity of bacteria present in surface snow around four Russian stations in Eastern Antarctica was studied by high throughput sequencing of amplified 16S rRNA gene fragments and shotgun metagenomic sequencing. Considerable class- and genus-level variation between the samples was revealed indicating a presence of inter-site diversity of bacteria in Antarctic snow. Flavobacterium was a major genus in one sampling site and was also detected in other sites. The diversity of flavobacterial type II-C CRISPR spacers in the samples was investigated by metagenome sequencing. Thousands of unique spacers were revealed with less than 35% overlap between the sampling sites, indicating an enormous natural variety of flavobacterial CRISPR spacers and, by extension, high level of adaptive activity of the corresponding CRISPR-Cas system. None of the spacers matched known spacers of flavobacterial isolates from the Northern hemisphere. Moreover, the percentage of spacers with matches with Antarctic metagenomic sequences obtained in this work was significantly higher than with sequences from much larger publically available environmental metagenomic database. The results indicate that despite the overall very high level of diversity, Antarctic Flavobacteria comprise a separate pool that experiences pressures from mobile genetic elements different from those present in other parts of the world. The results also establish analysis of metagenomic CRISPR spacer content as a powerful tool to study bacterial populations diversity. INTRODUCTION Snow covers about 35% of the Earth's surface-permanently or for varying times during the yearand is thus a major climatic and ecological system. It directly affects climate, moisture budget and sea level, and also serves as an interface between different ecosystems (Pomeroy and Brun, 2001;;Zhang, 2005;). Snow ecosystems are characterized by harsh conditions such as low temperatures, low atmospheric humidity, low liquid water availability, and high levels of radiation (Cowan and Tow, 2004). The amount of microorganisms on the surface snow varies from 10 2 cells per milliliter of melted snow on South Pole () to 10 2 -10 5 in high mountain and Arctic snow (;;;). Bacterial diversity from Arctic and alpine snow has been intensively investigated during the last few decades (;;;;). Bacteria of several phylogenetic groups have been detected; most were of Alphaproteobacteria, Betaproteobacteria, Gammaproteobacteria, Firmicutes, Bacteroidetes, and Actinobacteria classes (;;;;). Recently, a metagenomic study of Arctic spring snow suggested that snow bacteria can be adapted to photochemical reactions and oxidative stress in addition to cold stress (), and therefore may form specific communities. Microorganisms on the surface snow in Antarctica were also analyzed (;;;;). Representatives of Proteobacteria, Bacteroidetes, Cyanobacteria, and Verrucomicrobia have been detected in different sampling sites (;). Antarctic snow microbial communities have been found to be metabolically active based on the measurements of radioactive thymidine and leucine incorporation (;). Microbial activity on the surface snow of Dome C was also suggested by the presence of exopolysaccharide-like debris on the DAPI-stained filters and by scanning electron microscopy (). Also, evidence of active microbial life in the coastal snow of Antarctica was gained during analysis of "red snow" bacterial composition, which was dominated by green alga, producing pigment astaxanthin (). Comparative metagenomic analysis of Antarctic show has not been undertaken so far. Availability of such data, particularly from multiple sampling sites, could reveal the presence of particular snow-specific communities or, conversely, point to introduction of snow microorganisms through eolian effects. Here, we performed amplicon library and metagenomic analysis of bacterial sequences from Antarctic snow collected around four Russian stations in Eastern Antarctica. The results reveal very considerable variation between the sites and show clear evidence of deposition of marine bacteria in stations close to open water. We also performed metagenomic analysis of CRISPR spacers in a Flavobacterium common in Antarctic snow. The results revealed, surprisingly, a staggering diversity of CRISPR spacers that is distinct from the limited known diversity of flavobacterial spacers from the Northern hemisphere, suggesting that diversity of flavobacterial CRISPR spacers is generated and maintained locally in response to specific genetic parasites. Study Sites Samples were collected during the austral summer of year from vicinity of four coastal Russian Antarctic stations-Progress, Druzhnaja, Mirnii, and Leningradskaja as described previously (). All stations are located on the coastal part of Eastern Antarctica (Figure 1). The distance between stations ranges from ∼150 km between Progress and Druzhnaja to ∼3000 km between Progress and Leningradskaja. The stations vary in indicators of climatic conditions, such as average temperature, humidity and wind speed as shown in Table 1. Total DNA Extraction, Amplification of 16S rRNA Genes, and Sequencing Samples of total DNA were prepared as described previously (). PCR of a bacterial 16S rRNA gene fragment (V3-V4 region) was performed with two universal primers 341F (5 -CCTACGGGNGGCWGCAG-3 ) and 805R (5 -GACTACHVGGGTATCTAATCC-3 ) under general conditions described by Herlemann et al.. 2 ng of total DNA was used as a template for each PCR reaction. To avoid biases during PCR amplification 10 replicates of each PCR reactions were performed for every sample and mixed prior to further manipulations. Amplicons were visualized on 1% ethidium bromide stained agarose gels and purified using Promega Gel extraction kit according to the manufacturer's instructions. Negative controls (an aliquot of 10 l of Milli Q water subjected to concentration and DNA purification for each sample) resulted in no visible amplification products, confirming that our sample collection and processing techniques were essentially free of contamination. Pair-end sequencing was carried out on Illumina MiSeq platform with MiSeq reagent kit v.2 (Illumina, USA) as described previously (). Sequencing of Metagenomic DNA Libraries For metagenomic sequencing 100 ng of total DNA from each sample was used to prepare libraries as described previously (). Pair-end sequencing was carried out on Illumina MiSeq platform with MiSeq reagent kit v.2 (Illumina, USA). Analysis of 16S rRNA Gene and Metagemic Libraries Reads produced by sequencing of 16S rRNA amplicons were subjected to basic trimming (). First, sequences were demultiplexed, trimmed by quality with Phred score ≥ 20 and no admission of ambiguous bases using CLC Genomics 7.0 workbench software (CLC Bio Aarhus, Denmark), and sequences longer than 100 bp were taken for further processing. Homopolymers longer than 8 nt were removed using NGS QC toolkit with HomoPolymerTrimming.pl Perl script (Patel and Jain, 2012) and chimeric sequences were removed using Ribosomal Database Project (RDP) chimera check pipeline (). Phylotyping and statistical analysis was performed using the RDP classifier via taxonomic supervised method with 80% confidence threshold cut off (), as this approach allows rapid and extensive community comparison (). Raw reads from shotgun metagenomic sequencing were trimmed by quality with Phred score ≥ 20 and no admission of ambiguous bases. Adapters were trimmed using CLC Genomics workbench software (CLC Bio Aarhus, Denmark); reads longer than 50 bp were subjected to further analysis. Trimmed sequences were applied to MG-RAST database (). Reads were taxonomically and functionally annotated by similarity searching against M5NR database and Subsystems database, respectively, with default parameters (maximum e-value cutoff of 10 −5, minimum identity cutoff of 60% and minimum alignment length cutoff of 15). To specifically search for viral sequences in metagenomic libraries, sequences were subjected to Metavir online tool (), where they were blasted against Viral Refseq database (NCBI). Obtained affiliated sequences were filtered from bacterial homologs using supplementary pipeline: firstly, they were blasted against nucleotide (nt) database using blastn standalone application and afterwards viral sequences were extracted using Megan 5.10.1 software (). Statistical Analysis Several measurements of alpha diversity were used to estimate the diversity of bacteria in the samples. Species richness estimators S chao1 and S ace (Kemp and Aller, 2004b), and community diversity indices Shannon and Simpson were calculated using RDP analysis tools. Coverage of 16S rRNA libraries was calculated according to Good's formula: C = 1 -(N/individuals), where N is the number of sequences that occurred only once (Kemp and Aller, 2004a). Identification and Analysis of CRISPR Arrays To construct a set of CRISPR arrays for each metagenomic dataset we used CRASS algorithm () with default parameters: repeat lengths 23-47 bp, spacer lengths 26-50 bp, and minimum three spacers in array as default parameters. Spacer and repeat sequences were compared with nucleotide (nt) database using BLAST+ tool installed on Galaxy platform with default parameters for short input sequence (word size 7, gapopen 5, gapextend 2, reward 2, penalty -3, e-value 0.01). Repeat sequences from identified CRISPR arrays were classified using CRISPRmap tool. The cas genes search was performed using MG-RAST Subsystems annotation tool (). To amplify CRISPR arrays of Flavobacterium psychrophilum from total DNA samples primers Flavo_F (CAAAATTGTATTTTAGCTTATAATTACCAAC) and Flavo_R (TACAATTTTGAAAGCAATTCACAAC) were used. Amplification reactions were carried out with Taq DNA polymerase under the following conditions: initial denaturation for 5 min at 95 C, followed by 28 cycles of 30 s at 95 C, 30 s at 55 C, and 40 s at 72 C, and a final extension at 72 C for additional 2 min. Amplicons were visualized on 1% ethidium bromide stained agarose gels and DNA fragments of 200-1000 bp in length were purified from the gel and sequenced on Illumina MiSeq platform as described above. Raw reads were demultiplexed, trimmed by quality with Phred score ≥ 20 and no admission of ambiguous bases using CLC Genomics 7.0 workbench software (CLC Bio Aarhus, Denmark). Spacers from amplified CRISPR arrays were bioinformatically extracted using DNAStringSet function of IRanges package in R. To decrease the amount of spacers and to avoid overrepresented diversity because of mistakes during sequencing, spacers were clustered using a k-means algorithm. The maximum number of substitutions corresponding to biologically similar spacers within one cluster was equal to 5. Coverage and diversity estimates S chao and S ace for total amount of spacers or clusters in each sample were calculated with estimateD function of vegan package in R. Centers of spacer clusters (sequences of mean arithmetic value for each nucleotide position calculated from all spacers within a cluster) were compared against nucleotide collection (nt) and environmental collection (env_nt) databases, as well as against custom-made database containing sequences from Antarctic shotgun metagenomic libraries from the present work, with BLASTn algorithm using default parameters for short input sequences mentioned above and an e-value cut off of 0.01. Sequences with <5 mismatches were considered as positive hits. Metagenomic sequences containing protospacers were blasted against nt and nr databases with default parameters for BLASTn algorithm and an e-value cut off of 0.001 using BLAST+ tool installed on Galaxy platform. PAM searches were performed with CRISPRTarget online tool (). Eight nucleotides upstream and downstream of each protospacer were extracted and used for PAM logo search with Weblogo online tool (http://weblogo.berkeley.edu/logo.cgi). Data Access The data of 16S rRNA high throughput sequencing were Metagenomic Analysis of 16S rRNA Sequences from Antarctic Snow Samples Earlier, we studied the bacterial diversity of surface snow from two Russian Antarctic stations, Leningradskaja and Druzhnaja, by analyzing individual 16S rRNA gene fragments cloned after PCR amplification of DNA from melted snow samples collected during the 54th and 55th Russian Antarctic expeditions (). For the present work, we used high-throughput sequence analysis of 16S rRNA amplicons from Leningradskaja and Druzhnaja 55th expedition samples analyzed previously and also included samples collected at the Progress and Mirnii stations during the same time. The microbial diversity at the two latter stations was not analyzed before, however, the biological activity of snow collected at Mirnii was at least 10 times higher than in the Leningradskaja and Druzhnaja samples (). For Progress, bioactivity levels were low (4.4 pmol/h * l of thymidine incorporation and 33.1 pmol/h * l of L-leucine incorporation) and comparable to those in Leningradskaja and Druzhnaja samples. DNA concentration was estimated by measuring absorbance by NanoDrop yielding a concentration estimate of 1, 1, 2, and 14 ng/l for Druzhnaja, Leningradskaja, Progress and Mirnii samples, correspondently. To access bacterial diversity in snow samples, a fragment of bacterial 16S rRNA gene was amplified from total DNA following by Illumina pair-end high throughput sequencing (HTS). The overall sequencing statistics are presented in Table S1. Results of phylogenetic analysis of 16S rRNA sequences from Leningradskaja and Druzhnaja samples generated by HTS and Sanger sequencing of cloned libraries were first compared. Overall, comparisons of classlevel distribution revealed by both methods are in very good agreement with each other ( Figure 2B; Pearson coefficient of correlation for Druzhnaja sample-0.99, for Leningradskaja-0.95). Yet, for both stations, HTS analysis revealed increased relative abundance (or even appearance) of several minor classes, including Flavobacteriia, Alphaproteobacteria, Sphingobacteriia, Cytophaga, and Actinobacteria. 16S rRNA gene sequences recovered by HTS from the four stations fell into 34 classes based on RDP classification. 3.4, 3.9, 4.5, and 4.3% of 16S rRNA gene reads from, correspondingly, Druzhnaja, Leningradskaja, Mirnii, and Progress samples could not be affiliated to any known bacterial class by the RDP classification tool. Overall, the most abundant classes were: Alphaproteobacteria, Betaproteobacteria, Gammaproteobacteria, Sphingobacteriia, Flavobacteriia, Cytophagia, Actinobacteria, Chloroplast/Cyanobacteria, Bacilli. While Betaproteobacteria were dominant in Leningradskaja, Druzhnaja, and Mirnii samples, Flavobacteriia were the major class in the Progress sample, constituting 40% of all sequences (Figure 2A). In fact, the latter sample was clearly very different in composition from the first three based on Pearson correlation analysis at class level ( Figure 2C). Deeper taxonomic affiliation analysis at each site was next performed. 28, 20, 14, and 35% of 16S rRNA gene reads from, correspondingly, Druzhnaja, Progress, Mirnii, and Leningradskaja could not be affiliated to any known genus by the RDP tool. Results of the analysis of remaining reads are shown in Figure 3A, where abundances of 20 most prevalent genera are presented. The genus detected in the most abundance in any given sample was Flavobacterium, which comprised 39% of the sequences in the Progress library, followed by Hydrogenophaga (14%) and Ralstonia (7%). In the Druzhnaja sample, 16S rRNA genes from Janthinobacterium were dominant (27%), followed by Ralstonia (15%), and Pseudomonas (11%). In the Leningradskaja sample, 16S rRNA genes from Caulobacter (12%), Acinetobacter (10%), and Comamonas (9%) were most abundant. These genera were also the most abundant during clone library analysis () and in fact the abundance of genera in Druzhnaja and Leningradskaja stations, as revealed by cloning library and HTS approaches, was highly correlated (Pearson correlation coefficient of 0.8 and 0.9, respectively, data not shown). In Mirnii-rRNA gene sequences of Ralstonia (31%), Bacilariophyta (chloropastcontaining diatoms) (24%), and Rudaea (8%) were the most dominant. There was no correlation of genera abundance or presence between samples from the four different stations: the Pearson correlation coefficient varied from 0.1 for Progress and Leningradskaja to 0.4 between Mirnii and Druzhnaja ( Figure 3B). Shotgun Metagenomic Analysis of Antarctic Snow DNA Samples DNA samples from the four stations were also subjected to shotgun metagenomic sequencing. The summary of data obtained from four snow samples is shown on Table S2. Sequences that passed the QC criteria were applied to Best hit classification algorithm of the MG-RAST software using M5NR database for phylogenetic affiliation of sequences. The results are summarized in Table 2. The percentage of archaeal sequences in shotgun metagenomic libraries was consistently low in all stations (<0.2% of all sequences) and these sequences were not further analyzed; no archaeal sequences were obtained previously in clone 16S rRNA libraries in Druzhnaja and Leningradskaja samples (). Viral samples were extracted from metagenomic data through Metavirome tool and were also rare. Eukaryota were well-represented in Mirnii library-15% of all sequences. Samples from other stations contained much less eukaryotic sequences (∼1% or less). More than half of eukaryal sequences from Mirnii were from Bacilariophyta, suggesting that "cyanobacterial" sequences present in the amplified 16S rRNA gene samples from this station were actually of chloroplast origin. The Mirnii and Progress stations are located within 1-5 km of open water, while Druzhnaja and Leningradskaja, Protein-coding sequence reads from snow metagenomes were classified to metabolic functions based on Subsystems database using MG-RAST software. The most abundant functional groups were related to housekeeping functions, such as clusteringbased subsystems (functional coupling evidence but unknown function; 14-16%), carbohydrate metabolism (9%), amino acid biosynthesis (8%), and protein metabolism (6.5-8.5%). Stress response related genes constituted 2.3-2.9% of all annotated reads and within this group there was a high proportion of oxidative stress genes (43-44%). Genes of photosynthesis and respiration were clearly more abundant at Mirnii station, where chroloplast/cyanobacterial sequences were common. Recently, principal component analysis of the relative abundance of annotated reads of functional subsystems from Arctic surface snow metagenomes was presented and a conclusion was made that snow samples grouped together and were well-separated from other ecosystem metagenomes (). We repeated this analysis including our Antarctic snow metagenomes data. When Antarctic samples were substituted for Arctic samples used in the previous analysis, clear ecosystem clustering similar to the earlier reported result was obtained (Figure 4A), seemingly indicating commonalities of microbial communities of Antarctic snow. However, when Arctic snow metagenomic samples were also included, Antarctic samples became indistinguishable from soil and Antarctic microbial mat metagenomes; the free ocean water samples remained tightly clustered and separate, while the Arctic snow samples became very dispersed (Figure 4B). Analysis of CRISPR-Cas Sequences in Antarctic Metagenomes The CRISPR-Cas systems of adaptive prokaryotic immunity are widespread in bacteria (Marraffini and Sontheimer, 2010;) and are highly dynamic (), allowing one, in principle, to monitor the structure of bacterial populations in environment (;). We searched for cas genes and CRISPR arrays fragments in sequences from our shotgun metagenomic libraries. The cas genes of all three CRISPR-Cas system types were found. Specifically, fragments of cas1, cas2, cas3, csn1 (cas9) as well as cas4b and cmr1-6 genes were detected. These reads constituted less than 0.03% of all sequences. Fragments of CRISPR arrays were also identified in every library. Some identified repeats matched previously described ones, for example a 46-bp repeat from type II CRISPR-Cas system from Flavobacterium psychrophilum (), found in Progress and Druzhnaja, and a different type II 36bp repeat matching Flavobacterium columnare in Leningradskaja and Progress samples. A type I-F CRISPR-Cas system repeats from Yersinia pseudotuberculosis were found in Druzhnaja, Leningradskaja, and Progress (Table S3). CRISPRmap, an automated tool for classification of prokaryotic repeats based on sequence and structure conservation, has been reported to classify as many as 30-40% of repeat sequences from human microbiome samples. In contrast, in the case of Antarctic samples out of a total of 40 distinct repeats identified, only one could be matched with a known family (six could be matched with a known structural motif), indicating that the variety of existing adaptive immunity systems is greatly underexplored. When spacers extracted from identified Antarctic CRISPR arrays were analyzed, no matches with spacers of previously known CRISPR arrays was detected. Further, when the entire collection of 570 unique spacers recovered from Antarctic snow metagenomic libraries was analyzed against the NCBI nucleotide collection (nt), only a single hit, for a spacer associated with the F. columnare-like 36-bp repeat, was found. This spacer matched exactly a fragment of 16S rDNA sequence of another representative of the Flavobacterium genus, Flavobacterium sp. 136G (NCBI accession number KM021132.1), contrary to the general observation that CRISPR spacers target DNA of mobile genetic elements. CRISPR interference in type II systems requires a functional protospacer adjacent motif (PAM), located downstream of the protospacer (). The PAM sequence of F. columnare type II CRISPR-Cas system is not known. Analysis of 43 spacers from CRISPR array of a sequenced F. columnare genome (NCBI accession number CP003222.2) revealed four matches with flavobacterial phage FCL-2 protospacers. Sequences adjacent to these protospacers contained a TAA trinucleotide five nucleotides downstream of each protospacer. Both the downstream location of the putative PAM, and its separation from protospacers by a string of non-conserved nucleotides is typical for type II CRISPR-Cas systems (). The putative PAM sequence was absent downstream of the Flavobacterium sp. 136G 16S rDNA sequence matching the spacer identified from metagenomic data. Thus, the particular 16S rDNA targeting spacer may not be functional (see, however, below). Three spacers-associated with the F. psychrophilum 46-bp repeat-were found in both Progress and Druzhnaja samples. The rest of the spacers were unique for each station. Since flavobacterial rRNA was present in samples from all spacers, we were interested in assessing diversity of F. psychrophilum spacers in each site. To this end, PCR primers matching 46-bp repeat were designed and used to amplify spacers from each snow community DNA ( Figure 5A). By design, the procedure allows amplification of spacers associated with the 46-bp repeat, however the information about the order of the spacers in CRISPR arrays is lost. Amplification products were detected in samples from three stations-Progress, Druzhnaja, and Leningradskaja. The amplified material was subjected to Illumina sequencing. A total of ∼870,000 spacers with an average length of 30 ± 2 nucleotides was obtained (in published F. psychrophilum genomes spacers are 28-31 long). We next clustered spacers in each sample, combining spacers that differ from each other by <5 nucleotides in the same cluster. After clustering, 2759 unique spacer clusters remained in Leningradskaja, 2584-in Druzhnaja, and 3822-in Progress (Table 3), so true variety in samples was thus 1.5-2.5 times higher than the actual number of clusters obtained. It therefore follows that the diversity of CRISPR spacers associated with the F. psychrophilum 46-bp repeat (and, by extension, of F. psychrophilum) in Antarctic snow is extremely high. When spacers from each station were compared to each other, only 58 clusters (0.7% of the total) were common for all three stations ( Figure 5B). The percentage of clusters unique to each station varied from 66% for Druzhnaja to 92% in Leningradskaja. The Druzhnaja spacer set was most similar to Progress (about 30% of common spacers), with much smaller (<7%) overlap with Leningradskaja set. The overlap of Progress and Leningradskaja sets was just 3%. Ninety-five percent of all spacers were located within 14, 29, and 21% of clusters from Progress, Leningradskaja, and Druzhnaja, correspondently, i.e., were highly overrepresented. Bacteria with such spacers must be highly abundant in the samples. Alternatively, overrepresented spacers may be shared between many strains. A small fraction (1-3%) of self-complementary spacers derived from the same protospacer was observed. Such pairmated spacers have been reported before for Streptococcus agalactiae, Sulfolobus solfataricus, and Escherichia coli (Erdmann and Garrett, 2012;;). In most cases, when self-complementary spacers were observed, one spacer in the pair belonged to an over-represented group. A high number of such paired spacers were shared between two or more stations (up to 92% self-complementary spacers in the Druzhnaja station sample were also found in other stations). Many reads corresponded to amplified fragments that contained two spacers and, therefore, harbored a copy of an "internal" repeat, whose sequence, by design, could not be affected by the primers used during amplification step ( Figure 5A). Analysis of such reads revealed different repeat variants (Table S5). Similar cases of nearly identical repeats sequences were described previously for other organisms, for example, E. coli (Touchon and Rocha, 2010) or H. volcanii (). The most abundant variant constituted 65.6% of all "internal" repeat sequences and matched the published F. psychrophilum repeat consensus used to design oligonucleotide primers for amplification. The second variant had one mismatch from consensus in the 6th position and constituted 34% of all "internal" repeats. Two other repeat variants had, in addition to the 6th position consensus mismatch, changes in the 13th or the 21th positions and were minor (0.2 and 0.1% of all "internal" repeats, correspondingly). The relative proportion of repeat variants was the same in libraries from the three Antarctic sites analyzed. In sequenced F. psychrophilum genomes a variant repeat with one mismatch from consensus in the 18th position constitutes 4% of all repeat sequences. This variant is absent from Antarctic samples. When cluster consensus sequences from each station were compared to the NCBI nucleotide database using BLASTn algorithm a very large number of matches with likely irrelevant (i.e., eukaryotic) sequences was found. We therefore limited comparisons to a custom database containing all known sequences of Flavobacterium and their phages. None of Antarctic spacers matched any of the 117 unique spacers associated with 46-bp repeat from fourteen sequenced F. psychrophilum strains available in the Genbank (our clustering procedure combined these 117 spacers into 97 clusters). Ten Antarctic spacer clusters matched flavobacterial phages FCL-2, 6H, 11b, or 1/32, while 38 matched Flavobacterium chromosomes (Table S6). Interestingly, one cluster consensus sequence (leningradskaja_747) had multiple hits in various flavobacterial genomes (F. indicum, F. psychrophilum, F. columnare, and F. branchiophilum). Inspection of genomic sites that matched this spacer revealed that they are composed of non-coding 125 bp-long imperfect palindromic repeats that are spread throughout the F. indicum (30 copies) and F. psychrophilum (5 copies) genomes and are present in single copies in F. columnare and F. branchiophilum ( Figure 6A). Analysis of distribution and genetic neighborhoods of these repeats in F. indicum and F. psychrophilum (data not shown) genomes revealed that they are clustered in regions containing multiple repeated genes of unknown function, transposes genes, and restriction-modification system genes ( Figure 6B). We also analyzed CRISPR cassettes from all F. psychrophilum isolates available in the Genbank. Twelve spacers matching flavobacterial phages 6H and 1/32 were identified among the 117 unique spacers present in F. psychrophilum strains sequenced to date. When flanking sequences of these protospacers were compared to each other, a likely PAM, NNATAT, downstream of protospacers was detected ( Figure 6C). Neither 10 protospacers in the genomes of flavophages nor 38 protospacers in flavobacterial genomes matching Antarctic spacers contain such (or any other) adjacent conserved motive. We next compared consensus sequences of Antarctic spacer clusters with metagenomic reads obtained in this work as well as with sequences from the metagenomic env_nt database. A total of 117 hits to env_nt database and 511 hits to Antarctic reads was obtained. When the origin of 511 Antarctic metagenomic reads that contained sequences matching F. psychrophilum spacers was investigated, 62% of reads could not be identified by either nt or nr database searches. Of the remaining 38% of reads (corresponding to 194 cluster consensus sequences), 87 originated from flavobacterial chromosomes, 21-from Flavobacterium phage 11b or plasmids, 49-from other phages (mostly Cellulophaga phage phi10:1), and 37 originated from other eubacterial genomes. 12 and 18 additional hits to Flavobacterium chromosomes and flavophages, correspondingly, were obtained when reads with no matches to nt database were analyzed against the nr database. Among matching sequences in the env_nt database, there were four Flavobacterium chromosomes and 12 bacteriophages of various hosts. When flanking sequences of protospacers identified in Antarctic metagenomic sequences were compared to each other, an area of strong conservation 3-6 nucleotides downstream of the protospacer-NNAAAG -was detected ( Figure 6D). This sequence is different from the putative PAM motif detected during searches with spacers from published F. psychrophilum genomes (NNATAT, above, Figure 6C) but the location of conserved positions is the same. No conservation in flanking sequences was detected for protospacers identified in metagenomic reads from the env_nt database. Neither one of the putative PAM motives is associated with protospacers from 125 bp-long imperfect palindromic repeats (above). DISCUSSION In this work, we significantly extended the previous analysis of surface snow microbiota around Russian research stations in Eastern Antarctica by (i) increasing the number of stations analyzed, (ii) using high-throughput sequencing to analyze 16S rRNA genes; (iii) performing metagenomic analysis of snow microbiome, and (iv) analyzing the diversity of CRISPR spacers of flavobacteria common in Antarctic snow. Analysis presented in this work was more extensive than previous limited analysis using cloned 16S rRNA genes fragments (∼50,000 sequences per each sample compared to ∼120 sequences analyzed using clone library approach). Yet, for the two stations where direct comparisons are possible, Druzhnaja and Leningradskaja, a very good correlation between the classand genus-level composition of microbial sequences in the samples was revealed, indicating that limited sampling of clone libraries did not introduce significant biases in representation of major classes and genera. Moreover, when rRNA gene sequences were extracted from metagenomic reads and classlevel phylogenetic complexity was compared with amplified 16S rRNA genes a good match was also observed (Pearson coefficient values between 0.94 and 0.98), indicating that our conditions of PCR amplification of 16S rRNA gene fragments did not introduce significant biases. HTS analysis revealed increased abundance (or even appearance) of several minor classes, including Flavobacteriia, Alphaproteobacteria, Sphingobacteriia, Cytophaga, and Actinobacteria in both stations These minor classes appeared at the expense of Betaproteobacteria, which, nevertheless still remained the major class in both samples. The result is an expected consequence of much deeper coverage obtained with HTS. Principal component analysis of the relative abundance of annotated reads of functional subsystems from Antarctic surface snow metagenomes revealed some clustering, which, however, was found to be very sensitive to the inclusion of additional environmental samples in the analysis. As expected and recently confirmed by experimental data (), there is a much greater overlap in shared genes revealed by metagenomic DNA analysis compared to transcriptomic and proteomic analyses of samples from different ecosystems. Such a large overlap may explain the observed instability of results of principal component analysis of functional subsystems in Antarctic metagenomic data. Additional studies will be needed to confirm if there is a characteristic set of gene functions in snow communities. Spoligotyping, a procedure based on comparisons of spacer sets in different strains of same bacterial species is commonly used for epidemiological tracing of pathogens (). We reasoned that F. psychrophilum CRISPR arrays, if present in all four sampled Antarctic sites, may allow us to compare diversity of resident F. psychrophilum populations and establish relationships between them. An efficient procedure was elaborated to amplify spacer sets from environmental DNA and k-mean clustering allowed us to parcel the very large number of spacers generated after PCR amplification into a manageable number of spacer clusters. Still, a very high number of spacer clusters was observed in the samples, which is an unexpected result, since a recent report indicated that the F. psychrophilum CRISPR-Cas system is inactive and that the spacer content of CRISPR arrays is identical in F. psychrophilum isolated in geographically remote locations at different times (). Spacer sets present in three different Antarctic sites, where successful amplification using F. psychrophilum CRISPR repeatspecific primers was achieved differed significantly from each other, with only a very minor portion of spacers being common to all three sites. The larger amount of common spacers between Druzhnaja and Progress agrees with geographical proximity of these stations. Curiously, this similarity, based on common CRISPR spacers was not supported by phylogenetic analysis of bacterial communities based on 16S rRNA genes, according to which Druzhnaja was more similar to Leningradskaja station. Despite the very large number of F. psychrophilum spacers uncovered in our work, no matches with spacers present in F. psychrophilum isolates from the Northern hemisphere available in Genbank were observed. Moreover, comparisons with environmental metagenomic data revealed that Antarctic shotgun metagenome from our work, which is orders of magnitude smaller than combined metagenomes stored in the env_nt database contains several times more hits with Antarctic F. psychrophilum spacers revealed during HTS analysis of amplified CRISPR spacers. The result suggests that Antarctic F. psychrophilum tend to acquire spacers locally. Recent evidence of genetically different pools of viruses in Southern Ocean and Northern hemisphere sampling sites (including Vancouver Island in British Columbia, Monterey Bay, California, and Scripps Pier in San Diego, California) was recently obtained (). The presence of such separate pools in flavophages could be responsible for observed variations in spacer content (see, however, below). The CRISPR-Cas systems of Antarctic F. psychrophilum and strains isolated in the Northern hemisphere may even have evolved different PAM specificities since putative PAMs revealed by comparisons of protospacers matching spacers known for the two sites result in different PAMs. Such a result is not without precedent since varying preferences for PAM selection during spacer acquisition were previously noted for type I-E CRISPR-Cas system variants from different E. coli strain () and for type I-B CRISPR-Cas system of Haloferax volcanii (). The presence of different, non-overlapping sets of CRISPR repeat polymorphisms in our Antarctic samples and in known F. psychrophilum CRISPR arrays also supports existence of local variations. The original theoretical insights about the immune function of CRISPR-Cas systems came after observation of matches between spacer sequences and protospacers in bacteriophage and plasmid sequences specific to a bacterial host (). Later, self-targeting spacers were also identified and a regulatory function of such spacers was proposed (for detailed review, see ). Analysis of F. psychrophilum repeat associated spacers suggests, that at least for the Antarctic spacer set, targeting of bacteria related to the host is the most common scenario. Such targeting could help prevent genetic exchange between the species within the genus, although the biological significance of such restriction is unclear. Previous analysis has revealed the loss of synteny within the Flavobacterium spp. genomes likely due to the presence of numerous repeats (e.g., insertion sequences and the rhs elements (;). Our analysis revealed an interesting case of a CRISPR spacer with multiple hits in various flavobacterial genomes. The matching sequence was part of a non-coding 125 bp-long imperfect palindromic repeat that is spread throughout the F. indicum and F. psychrophilum genomes and is also present in single copies in F. columnare and F. branchiophilum. The location and the number of these repeats differ in different isolates of F. psychrophilum, suggesting that they are subject to horizontal transfer. The 125-bp repeat is distinct from either IS or rhs elements, however, it may play a similar role in promoting flavobacterial genome plasticity. Targeting of this element by the CRISPR-Cas system may help control the spread of such elements and is in line with an emerging theme that CRISPR-Cas systems serves as one of the mechanisms of endogenous gene regulation (). Our analysis of Antarctic spacers has an important caveat in that we determine the identity of spacers associated with a particular repeat and can not exclude that such a repeat (and spacers) are not coming from arrays from other, non-F. psychrophilum arrays. We consider this scenario unlikely since at least in Progress station, where rRNA gene sequences from F. psychrophilum are most abundant, the spacer variety is also the largest. Besides, the largest number of spacers with matches to metagenomic sequences match Flavobacterium chromosomes, which also strengthens the link between spacers identified by our approach and the Flavobacterium genus. AUTHOR CONTRIBUTIONS AL collected samples, performed experiments, analyzed data, prepared figures; SM performed clustering and PCA analysis, prepared figures; SS performed heatmap analysis; ML performed NGS sequencing; VK collected samples, organized expedition fieldwork; KS designed research, supervised the project, analyzed data, wrote the paper. FUNDING This work was supported by foundation of Ministry of education and science of the Russian Federation (No14.B25.31.0004) and by Russian Science Foundation (No14-14-00988). The funders had no role in study design, data collection and interpretation, or the decision to submit the work for publication. SUPPLEMENTARY MATERIAL The Supplementary Material for this article can be found online at: http://journal.frontiersin.org/article/10.3389/fmicb. 2016.00398
The Role of a Photoresist Film on Reverse Gas Plasma Etching of Chromium Films The role of a photoresist film on reverse gas plasma etching of chromium photomask plates has been studied. The variation of etching profiles has been observed using SEM techniques. It is speculated that a WO3 layer on the chromium film forms a masking layer to the etching and that the WO3 layer can be removed by decomposition of the photoresist film in the plasma. A study of the relation between photoresist thickness and etching time has shown that there is an optimum photoresist thickness for each WO3 concentration in the chromium film.
def bokeh_palette(name, rawtext, text, lineno, inliner, options=None, content=None): try: exec("palette = %s" % text, _globals) except Exception as e: raise SphinxError("cannot evaluate palette expression '%r', reason: %s" % (text, e)) p = _globals.get('palette', None) if not isinstance(p, (list, tuple)) or not all(isinstance(x, str) for x in p): raise SphinxError("palette expression '%r' generated invalid or no output: %s" % (text, p)) w = 20 if len(p) < 15 else 10 if len(p) < 32 else 5 if len(p) < 64 else 2 if len(p) < 128 else 1 html = PALETTE_DETAIL.render(palette=p, width=w) node = nodes.raw('', html, format="html") return [node], []
In the medical profession, a physician may wish to confirm a change in a diseased or affected part after a lapse of time. In a case in which a CT (Computed Tomography) apparatus is used to make this confirmation, for example, images of the same diseased or affected part need to be extracted from groups of CT images captured at different points in time. The CT apparatus generates images of the body sliced from a parietal in a direction towards a toe. In a case in which a position of a desired body part does not change within the body, and a position of the image of this desired body part in a sequence of images from the parietal or the toe can be specified in the group of CT images captured at another point in time, the images at the specified position in the sequence of images can be extracted from the groups of CT images captured at different points in time, so as to obtain the images of this desired body part. An example of the related art is described in International Publication Pamphlet No. WO2009/150882, for example. However, the position of a tip end part (a side farther away from a bronchial tube) of a lung, or the like moves up and down, for example, depending on a movement of a diaphragm at a time of breathing, and a slice position of the image capturing the desired body part may change. For this reason, it is difficult to determine the position of the image of the desired body part in the sequence of images captured at one point in time, in the sequence of images captured at another point in time. A technique that compares features of images may be used to search, from the group of CT images captured at the other point in time, the body part having features similar to those of the desired body part included in the group of CT images captured at the one point in time. In this case, it is necessary to detect the features from the plurality of sliced images, and to collate the sliced images captured at the different points in time in order to find a match. For this reason, it takes a long processing time to search and find the image of the body part similar to the desired body part, and the search for the image of the body part having the features similar to those of the desired body part may not be completed within a desired length of time.
<reponame>portelaraian/algo-expert def smallestDifference(arrayOne, arrayTwo): # Find the pair of numbers (one from each array) # whose absolute difference is closest to zero # Return an array containing these two numbers arrayOne.sort() arrayTwo.sort() smallest_difference = float('inf') list_smallest_difference = [] for value in arrayTwo: left_idx = 0 right_idx = len(arrayOne) - 1 while left_idx <= right_idx: if (abs(arrayOne[left_idx] - value) < smallest_difference): list_smallest_difference = [arrayOne[left_idx], value] smallest_difference = abs(arrayOne[left_idx] - value) if (abs(arrayOne[right_idx] - value) < smallest_difference): list_smallest_difference = [arrayOne[right_idx], value] smallest_difference = abs(arrayOne[right_idx] - value) left_idx += 1 right_idx -= 1 return list_smallest_difference
/** * Created by Administrator on 2016/1/19. */ public class CustomMonitorMenu extends View { public final int R1 = 100; public final int R2 = 8; //每一格 cos y的差距 public float width; public float height; public float screenWidth; public float screenHeight; public int startAngle = 235; //新的角度 开始时30 public float initialAngle = 0.1f; //初始弧度30 public int sweepAngle = 70; //角度 public double newX; public double newY; //给小圆球提供位置 空间移动到这个位置上面去 public Paint mPaint2; public Paint mPaint3; public Paint mPaint4; public boolean flag = false; public Paint mPaint; public Context mContext; public String Sangle = "0"; public String Eangle = "120"; /** * 一定一个接口 */ public interface ICoallBack { void getAngle(int angle); } /** * 初始化接口变量 */ ICoallBack icallBack = null; /** * 自定义控件的自定义事件 */ public void setEvent(ICoallBack iBack) { icallBack = iBack; } public CustomMonitorMenu(Context context) { super(context); this.mContext = context; this.setLayerType(View.LAYER_TYPE_SOFTWARE, null); initView(); // setWillNotDraw(false); } public CustomMonitorMenu(Context context, AttributeSet attrs) { super(context, attrs); this.mContext = context; this.setLayerType(View.LAYER_TYPE_SOFTWARE, null); initView(); // setWillNotDraw(false); } public void setindex(int initAng, int viewW) { int viewX = (int) (viewW - screenWidth); if (viewX != 0) { initialAngle = Math.abs(initAng) * sweepAngle / viewX; }else { viewW = 1; } if (initialAngle >= 0 && initialAngle <= 70) { initialAngle = 35 - initialAngle; } Log.e("hgz------->", " initAng = " + Math.abs(initAng) + " initialAngle = " + initialAngle + " viewX = " + viewX); invalidate(); } private void initView() { mPaint = new Paint(Paint.ANTI_ALIAS_FLAG); mPaint.setColor(Color.BLACK); mPaint.setAntiAlias(true); mPaint.setStrokeWidth(2.0f); mPaint.setStyle(Paint.Style.STROKE); mPaint2 = new Paint(Paint.ANTI_ALIAS_FLAG); // 抗锯齿 mPaint2.setColor(Color.BLACK); mPaint2.setStrokeWidth(2.0f); mPaint2.setStrokeJoin(Paint.Join.ROUND); // 让画的线圆滑 mPaint2.setStrokeCap(Paint.Cap.ROUND); mPaint3 = new Paint(); mPaint3.setColor(Color.BLACK); mPaint3.setStrokeWidth(2.0f); mPaint3.setStyle(Paint.Style.STROKE); mPaint3.setAntiAlias(true); mPaint4 = new Paint(Paint.ANTI_ALIAS_FLAG); mPaint4.setStrokeWidth(1); mPaint4.setTextSize(16); mPaint4.setStyle(Paint.Style.STROKE); mPaint4.setColor(Color.BLACK); // 下面这行是实现水平居中,drawText对应改为传入targetRect.centerX() // mPaint4.setTextAlign(Paint.Align.CENTER); // 根据屏幕的宽高确定圆心的坐标 DisplayMetrics metric = new DisplayMetrics(); ((MainActivity) mContext).getWindowManager().getDefaultDisplay().getMetrics(metric); // 屏幕的宽高 screenWidth = metric.widthPixels; screenHeight = metric.heightPixels; // 圆心位置坐标 width = metric.widthPixels / 2.0f; height = metric.heightPixels / 8.0f; Log.e("aaa--->圆心的坐标:", width + "---" + height); } @Override protected void onDraw(Canvas canvas) { super.onDraw(canvas); RectF oval1 = new RectF(width - 100, height - 100, width + 100, height + 100); canvas.drawArc(oval1, startAngle, sweepAngle, false, mPaint);//小弧形 Log.e("ooo--->", "圆环的圆心的坐标 : width " + width + " height " + height); // getNewLocation(initialAngle); //根据判断 来移动小球 获得新的位置 canvas.drawLine(width, (height - R1), width, (height - R1) + 10, mPaint3); canvas.drawText(Sangle, (float) (width - Math.sin(35 * Math.PI / 180) * R1) - 20, (float) (height - Math.cos(35 * Math.PI / 180) * R1) + 10, mPaint4); canvas.drawText(Eangle, (float) (width + Math.sin(35 * Math.PI / 180) * R1) + 8, (float) (height - Math.cos(35 * Math.PI / 180) * R1) + 10, mPaint4); // canvas.drawCircle((float) newX, (float) newY, R2, mPaint2); canvas.drawLine((float) (width - Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), (float) (width - Math.sin(35 * Math.PI / 180) * R1) - 5.0f, (float) (height - Math.cos(35 * Math.PI / 180) * R1) - 6.0f, mPaint3); canvas.drawLine((float) (width - Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), (float) (width - Math.sin(35 * Math.PI / 180) * R1) + 5.0f, (float) (height - Math.cos(35 * Math.PI / 180) * R1) + 6.0f, mPaint3); canvas.drawLine((float) (width + Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), (float) (width + Math.sin(35 * Math.PI / 180) * R1) + 5.0f, (float) (height - Math.cos(35 * Math.PI / 180) * R1) - 6.0f, mPaint3); canvas.drawLine((float) (width + Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), (float) (width + Math.sin(35 * Math.PI / 180) * R1) - 5.0f, (float) (height - Math.cos(35 * Math.PI / 180) * R1) + 6.0f, mPaint3); // canvas.drawCircle((float) (width - Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), R2, mPaint2); canvas.drawCircle((float) (width - Math.sin(initialAngle * Math.PI / 180) * R1), (float) (height - Math.cos(initialAngle * Math.PI / 180) * R1), R2, mPaint2); Log.e("hgz", "小球的坐标:" + (float) (width - Math.sin(initialAngle * Math.PI / 180) * R1) + " " + (float) (height - Math.cos(initialAngle * Math.PI / 180) * R1)); // canvas.drawCircle((float) (width + Math.sin(35 * Math.PI / 180) * R1), (float) (height - Math.cos(35 * Math.PI / 180) * R1), R2, mPaint2); } }