content
stringlengths
7
2.61M
<filename>py/tools/netboot_firmware_settings.py #!/usr/bin/env python3 # Copyright 2017 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility to access ChromeOS Netboot firmware settings.""" import argparse import pprint import socket import struct import sys from cros.factory.utils import fmap # Values to encode netboot settings. CODE_TFTP_SERVER_IP = 1 CODE_KERNEL_ARGS = 2 CODE_BOOT_FILE = 3 CODE_ARGS_FILE = 4 SETTINGS_FMAP_SECTION = 'SHARED_DATA' class Image: """A class to represent a firmware image. Areas in the image should be accessed using the [] operator which takes the area name as its key. Attributes: data: The data in the entire image. """ def __init__(self, data): """Initialize an instance of Image Args: self: The instance of Image. data: The data contianed within the image. """ try: # FMAP identifier used by the cros_bundle_firmware family of utilities. obj = fmap.fmap_decode(data, fmap_name='FMAP') except struct.error: # FMAP identifier used by coreboot's FMAP creation tools. # The name signals that the FMAP covers the entire flash unlike, for # example, the EC RW firmware's FMAP, which might also come as part of # the image but covers a smaller section. obj = fmap.fmap_decode(data, fmap_name='FLASH') self.areas = {} for area in obj['areas']: self.areas[area['name']] = area self.data = data def __setitem__(self, key, value): """Write data into an area of the image. If value is smaller than the area it's being written into, it will be padded out with NUL bytes. If it's too big, a ValueError exception will be raised. Args: self: The image instance. key: The name of the area to overwrite. value: The data to write into the area. Raises: ValueError: 'value' was too large to write into the selected area. """ area = self.areas[key] if len(value) > area['size']: raise ValueError('Too much data for FMAP area %s' % key) value = value.ljust(area['size'], b'\0') self.data = (self.data[:area['offset']] + value + self.data[area['offset'] + area['size']:]) def __getitem__(self, key): """Retrieve the data in an area of the image. Args: self: The image instance. key: The area to retrieve. Returns: The data in that area of the image. """ area = self.areas[key] return self.data[area['offset']:area['offset'] + area['size']] class Settings: """A class which represents a collection of settings. The settings can be updated after a firmware image has been built. Attributes of this class other than the signature constant are stored in the 'value' field of each attribute in the attributes dict. Attributes: signature: A constant which has a signature value at the front of the settings when written into the image. """ signature = b'netboot\0' class Attribute: """A class which represents a particular setting. Attributes: code: An enum value which identifies which setting this is. value: The value the setting has been set to. """ def __init__(self, code, value): """Initialize an Attribute instance. Args: code: The code for this attribute. value: The initial value of this attribute. """ self.code = code self.value = value @staticmethod def padded_value(value): value_len = len(value) pad_len = ((value_len + 3) // 4) * 4 - value_len return value + b'\0' * pad_len def pack(self): """Pack an attribute into a binary representation. Args: self: The Attribute to pack. Returns: The binary representation. """ if self.value: value = self.value.pack() else: value = b'' value_len = len(value) padded_value = self.padded_value(value) format_str = '<II%ds' % len(padded_value) return struct.pack(format_str, self.code, value_len, padded_value) def __repr__(self): return repr(self.value) @classmethod def unpack(cls, blob, offset=0): """Returns a pair of (decoded attribute, decoded length).""" header_str = '<II' header_len = struct.calcsize(header_str) code, value_len = struct.unpack_from(header_str, blob, offset) offset += header_len value = blob[offset:offset + value_len] offset += len(cls.padded_value(value)) if value: setting = IpAddressValue if code == CODE_TFTP_SERVER_IP else BytesValue value = setting.unpack(value) return cls(code, value), offset def __init__(self, blob): """Initialize an instance of Settings. Args: self: The instance to initialize. """ # Decode blob if possible. decoded = {} if blob.startswith(self.signature): offset = len(self.signature) format_items = '<I' items, = struct.unpack_from(format_items, blob, offset) offset += struct.calcsize(format_items) for unused_i in range(items): new_attr, new_offset = self.Attribute.unpack(blob, offset) offset = new_offset decoded[new_attr.code] = new_attr def GetAttribute(code): return decoded.get(code, self.Attribute(code, None)) attributes = { 'tftp_server_ip': GetAttribute(CODE_TFTP_SERVER_IP), 'kernel_args': GetAttribute(CODE_KERNEL_ARGS), 'bootfile': GetAttribute(CODE_BOOT_FILE), 'argsfile': GetAttribute(CODE_ARGS_FILE), } self.__dict__['attributes'] = attributes def __setitem__(self, name, value): self.attributes[name].value = value def __getattr__(self, name): return self.attributes[name].value def pack(self): """Pack a Settings object into a binary representation. The packed binary representation can be put into an image. Args: self: The instance to pack. Returns: A binary representation of the settings. """ value = self.signature value += struct.pack('<I', len(self.attributes)) for unused_i, attr in self.attributes.items(): value += attr.pack() return value class BytesValue: """Class for setting values that are stored as bytes strings.""" def __init__(self, val): """Initialize an instance of BytesValue. Args: self: The instance to initialize. val: The value of the setting. """ if isinstance(val, str): val = bytes(val, 'ascii') self.val = val def pack(self): """Pack the setting by returning its value as a bytes string. Args: self: The instance to pack. Returns: The val field as a bytes string. """ return self.val @classmethod def unpack(cls, val): return cls(val) def __bytes__(self): return self.val def __repr__(self): return repr(self.val.strip(b'\0')) class IpAddressValue(BytesValue): """Class for IP address setting value.""" def __init__(self, val): """Initialize an IpAddressValue instance. Args: self: The instance to initialize. val: A string representation of the IP address to be set to. """ in_addr = socket.inet_pton(socket.AF_INET, val) super(IpAddressValue, self).__init__(in_addr) @classmethod def unpack(cls, val): return cls(socket.inet_ntop(socket.AF_INET, val)) def __str__(self): return socket.inet_ntop(socket.AF_INET, self.val) def __repr__(self): return repr(str(self)) def DefineCommandLineArgs(parser): """Defines arguments in command line invocation. Args: parser: an argparse.ArgumentParser instance. """ parser.add_argument('--input', '-i', required=True, help='Path to the firmware to modify; required') parser.add_argument('--output', '-o', help='Path to store output; if not specified we will ' 'directly modify the input file') parser.add_argument('--tftpserverip', help='Set the TFTP server IP address (defaults to DHCP-' 'provided address)') parser.add_argument('--bootfile', help='Set the path of the TFTP boot file (defaults to ' 'DHCP-provided file name)') parser.add_argument('--argsfile', help='Set the path of the TFTP file that provides the ' 'kernel command line (overrides default and --arg)') parser.add_argument('--board', help='Set the cros_board to be passed into the kernel') parser.add_argument('--factory-server-url', help='Set the Factory Server URL') parser.add_argument('--arg', '--kernel_arg', default=[], dest='kernel_args', metavar='kernel_args', action='append', help='Set extra kernel command line parameters (appended ' 'to default string for factory)') def NetbootFirmwareSettings(options): """Main function to access netboot firmware settings.""" print('Reading from %s...' % options.input) with open(options.input, 'rb') as f: image = Image(f.read()) settings = Settings(image[SETTINGS_FMAP_SECTION]) print('Current settings:') pprint.pprint(settings.attributes) # pylint: disable=unsubscriptable-object if options.tftpserverip: settings['tftp_server_ip'] = IpAddressValue(options.tftpserverip) if options.bootfile: settings['bootfile'] = BytesValue(options.bootfile + '\0') if options.argsfile: settings['argsfile'] = BytesValue(options.argsfile + '\0') # pylint: enable=unsubscriptable-object kernel_args = '' if options.board: kernel_args += 'cros_board=' + options.board + ' ' if options.factory_server_url: kernel_args += 'omahaserver=' + options.factory_server_url + ' ' kernel_args += ' '.join(options.kernel_args) kernel_args += '\0' # pylint: disable=unsubscriptable-object settings['kernel_args'] = BytesValue(kernel_args) # pylint: enable=unsubscriptable-object new_blob = settings.pack() output_name = options.output or options.input # If output is specified with different name, always generate output. do_output = output_name != options.input if new_blob == image[SETTINGS_FMAP_SECTION][:len(new_blob)]: print('Settings not changed.') else: print('Settings modified. New settings:') pprint.pprint(settings.attributes) image[SETTINGS_FMAP_SECTION] = new_blob do_output = True if do_output: print('Generating output to %s...' % output_name) with open(output_name, 'wb') as f: f.write(image.data) def main(argv): """Main entry for command line.""" parser = argparse.ArgumentParser(description=__doc__) DefineCommandLineArgs(parser) options = parser.parse_args(argv) NetbootFirmwareSettings(options) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Joe the Plumber filed his first gripping dispatch on the conflict in Gaza from Sderot, Israel. And I'm sure that, like so many correspondents before him, he went to war in search of truth rather than for, say, publicity. Far be it from Joe to glamorize war or lament its notable lack of resemblance to the films of John Wayne or Oliver Stone. No, he's a fan of a different kind of film: newsreels. He wants to watch this war the way they used to, in the comfort of his local cinema, surrounded by wholesome examples of the American obesity epidemic. He wants stentorian voiceovers, marching in line, singing songs in unison. He doesn't want to hear a bunch of pointy-headed correspondents hemming and hawing about children being slaughtered and all that crap. In fact, Joe wants all the war reporters axed. Removed from the lines. Taken out of the theater. (Not the movie theater; they can still go there.) He doesn't want the public to be exposed to a bunch of confusing facts and figures. He wants the media "abolished" from the war altogether. Yet he's there?
Lactose repressor protein modified with dansyl chloride: activity effects and fluorescence properties. Chemical modification using 5-(dimethylamino)naphthalene-1-sulfonyl chloride (dansyl chloride) has been used to explore the importance of lysine residues involved in the binding activities of the lactose repressor and to introduce a fluorescent probe into the protein. Dansyl chloride modification of lac repressor resulted in loss of operator DNA binding at low molar ratios of reagent/monomer. Loss of nonspecific DNA binding was observed only at higher molar ratios, while isopropyl beta-D-thiogalactoside binding was not affected at any of the reagent levels studied. Lysine residues were the only modified amino acids detected. Protection of lysines-33 and -37 from modification by the presence of nonspecific DNA correlated with maintenance of operator DNA binding activity, and reaction of lysine-37 paralleled operator binding activity loss. Energy transfer between dansyl incorporated in the core region of the repressor protein and tryptophan-201 was observed, with an approximate distance of 23 A calculated between these two moieties.
Multiple sclerosis in Australia: socioeconomic factors. The data from an epidemiological study on multiple sclerosis in Australia have been analysed to determine the relation between the prevalence of the disease and educational level, and the association between level of disability and employment status. There was a significantly higher frequency of multiple sclerosis in those who left school at an older age and achieved a higher educational level. The explanation of this finding remains speculative and may be related to genetic or environmental factors. The study confirmed the recognised association between moderate-severe disability and divorce-separation and lower rates of employment.
Losing My Edge Background In an interview with the music site "ireallylovemusic", James Murphy (the leader of the group) explained his inspiration for the song: When I was DJing, playing Can, Liquid Liquid, ESG, all that kind of stuff, I became kind of cool for a moment, which was a total anomaly. And when I heard other DJs playing similar music I was like: 'Fuck! I'm out of a job! These are my records!' But it was like someone had crept into my brain and said all these words that I hate. Did I make the records? Did I fuck! So, I started becoming horrified by my own attitude. I had this moment of glory though. People would use me to DJ just to get them cool. They'd be like 'It's the cool rock disco guy' and this was really weird. And to be honest I was afraid that this new found coolness was going to go away and that's where 'Losing My Edge' comes from. It is about being horrified by my own silliness. And then it became a wider thing about people who grip onto other people's creations like they are their own. There is a lot of pathos in that character though because it's born out of inadequacy and love. — James Murphy In another interview, Murphy elaborated further on the birth and the release of the song: So I started going to different types of things and meeting different people, and started throwing parties. And all of a sudden, I was kind of cool, I'd always just been a total... not even an outsider, just sort of a nobody, a sort of invisible, sad and kind of shy. And all of a sudden I was DJing, and felt cool, throwing parties. And then one night I went to go see a band, and somebody else was playing the records that I was playing. Nobody else was playing the records that I was playing, that was sort of my thing! And I got really mad, and I got really defensive. And I was like 'What the heck, that's mine! What the--who the hell is this? Some 22-year-old...', and I got really embarassed [sic] by being like 'These aren't your records, you didn't write them, you just... play them, you just own them, you can't be proud of yourself for owning them' but I was mad at the same time because I was like 'I know that kid was at one of my parties...'. It was this really dense conflict that I couldn't resolve, and that's where 'Losing My Edge' came from. I didn't have a good answer. I was angry, but I was also pathetic for being angry. There wasn't really a right or wrong, you know? I was right and wrong, and this kid was right and wrong, and everybody there was right and wrong. It felt really dense, and really easy to write from. Easy to make something from. So I made that song, and everybody thought it was terrible. I remember playing it to people, and they'd give you this face (makes a face of surprise and doubt), you know, when they don't want to say anything! And they'd ask me about technical stuff like 'Oh, what are the drums?' and I was like 'OK, you don't like this'. And Phil Mossman, who was the original LCD guitar player, was the only person that was like 'I love this, I really love this, it's very funny'. So we put it out, and the people from the label, my two partners Tim and Jonathan afterwards said 'We just thought you were making a big mistake, you're gonna look like an idiot, and we just feel bad for you'. It was the B-side to 'Beat Connection' until the last minute. I thought 'No, that should be the A-side, that should be the one I should sink or swim with.' Composition "Losing My Edge" is a dance-punk and alternative dance song. It features a rhythm similar to "Change" by Killing Joke. The track mentions the following artists, genres and music venues: acid house, Can, Suicide, Captain Beefheart, Daft Punk, CBGB, Paradise Garage, Larry Levan, The Peech Boys, Modern Lovers, Niagara, Detroit techno, Yaz, and ends with "But have you seen my records?" and a list of the following: This Heat, Pere Ubu, Outsiders, Nation of Ulysses, Mars, Trojans, Black Dice (referred to as 'The Black Dice'), Todd Terry, the Germs, Section 25, Althea & Donna, Sexual Harassment, a-ha, Dorothy Ashby, PIL, The Fania All-Stars, The Bar-Kays, The Human League, The Normal, Lou Reed, Scott Walker, Monks, Niagara, Joy Division, Laurent Garnier, The Creation, Sun Ra, Scientists, Royal Trux, 10cc, Rammelzee, Eric B. and Rakim, The Index, Basic Channel, Soulsonic Force ("Just hit me!"), Juan Atkins, Manuel Göttsching, David Axelrod, Electric Prunes, Gil Scott-Heron, The Slits, Faust, Mantronix, Pharoah Sanders, The Fire Engines, Swans (referred to as 'The Swans'), Soft Cell (referred to as 'The Soft Cell') and The Sonics (repeated four times).
The Alternative Oxidase AOX Does Not Rescue the Phenotype of tko25t Mutant Flies A point mutation in the Drosophila gene coding for mitoribosomal protein S12 generates a phenotype of developmental delay and bang sensitivity. tko25t has been intensively studied as an animal model for human mitochondrial diseases associated with deficiency of mitochondrial protein synthesis and consequent multiple respiratory chain defects. Transgenic expression in Drosophila of the alternative oxidase (AOX) derived from Ciona intestinalis has previously been shown to mitigate the toxicity of respiratory chain inhibitors and to rescue mutant and knockdown phenotypes associated with cytochrome oxidase deficiency. We therefore tested whether AOX expression could compensate the mutant phenotype of tko25t using the GeneSwitch system to activate expression at different times in development. The developmental delay of tko25t was not mitigated by expression of AOX throughout development. AOX expression for 1 d after eclosion, or continuously throughout development, had no effect on the bang sensitivity of tko25t adults, and continued expression in adults older than 30 d also produced no amelioration of the phenotype. In contrast, transgenic expression of the yeast alternative NADH dehydrogenase Ndi1 was synthetically semi-lethal with tko25t and was lethal when combined with both AOX and tko25t. We conclude that AOX does not rescue tko25t and that the mutant phenotype is not solely due to limitations on electron flow in the respiratory chain, but rather to a more complex metabolic defect. The future therapeutic use of AOX in disorders of mitochondrial translation may thus be of limited value. ABSTRACT A point mutation in the Drosophila gene coding for mitoribosomal protein S12 generates a phenotype of developmental delay and bang sensitivity. tko 25t has been intensively studied as an animal model for human mitochondrial diseases associated with deficiency of mitochondrial protein synthesis and consequent multiple respiratory chain defects. Transgenic expression in Drosophila of the alternative oxidase (AOX) derived from Ciona intestinalis has previously been shown to mitigate the toxicity of respiratory chain inhibitors and to rescue mutant and knockdown phenotypes associated with cytochrome oxidase deficiency. We therefore tested whether AOX expression could compensate the mutant phenotype of tko 25t using the GeneSwitch system to activate expression at different times in development. The developmental delay of tko 25t was not mitigated by expression of AOX throughout development. AOX expression for 1 d after eclosion, or continuously throughout development, had no effect on the bang sensitivity of tko 25t adults, and continued expression in adults older than 30 d also produced no amelioration of the phenotype. In contrast, transgenic expression of the yeast alternative NADH dehydrogenase Ndi1 was synthetically semi-lethal with tko 25t and was lethal when combined with both AOX and tko 25t. We conclude that AOX does not rescue tko 25t and that the mutant phenotype is not solely due to limitations on electron flow in the respiratory chain, but rather to a more complex metabolic defect. The future therapeutic use of AOX in disorders of mitochondrial translation may thus be of limited value. Drosophila provides a useful animal model for human genetic diseases (Lloyd and Taylor 2010;Lu and Vogel 2009), including those associated with mitochondrial dysfunction (, Palladino 2010. Prominent among the latter are the many diseases caused by deficiency or malfunction of components of the machinery of mitochondrial protein synthesis (). These can be caused by point mutations of mitochondrial DNA (mtDNA), by large mtDNA deletions, or by nuclear gene lesions, and can involve interactions with environmental factors, including some antibiotics. Although their clinical phenotypes vary, a common thread is deficiency of multiple respiratory chain complexes, including ATP synthase, which include mtDNA-encoded subunits. The resulting metabolic crisis then produces a developmental and physiological disease condition, which can be widespread, severe, and often fatal. We have previously investigated a Drosophila model of such diseases; tko 25t carries a (recessive) point mutation in the gene for mitoribosomal protein S12 (;). tko 25t flies exhibit developmental delay, sensitivity to seizures induced by mechanical stress ("bang sensitivity"), and a set of linked phenotypes that share features with human mitochondrial disease, including hearing impairment and sensitivity to antibiotics that impair mitochondrial protein synthesis (). At the molecular level, tko 25t shows decreased abundance of mitoribosomal small subunits, multiple respiratory chain and ATP synthase deficiency (), and altered gene expression indicative of a metabolic shift toward glycolytic lactate production and anaplerotic pathways (). The phenotype of tko 25t flies can be partially suppressed by segmental duplication of the mutant gene in its natural chromosomal milieu ), by cybridization to specific suppressor cytoplasmic (mtDNA) backgrounds (), or by overexpression of spargel (), the Drosophila homolog of PGC1-a, proposed to function as a master regulator of mitochondrial biogenesis (Scarpulla 2011). In other studies, we found that toxic inhibition of complex III (cIII) by antimycin or cIV by cyanide, or phenotypes resulting from mutations or knockdown of cIV subunits or the cIV assembly factor Surf1 in Drosophila, could be mitigated by concomitant expression of the mitochondrial alternative oxidase (AOX) from Ciona intestinalis ). AOX is widespread in eukaryotes, being found in plants, fungi, and many animal phyla, although not in arthropods or vertebrates (). It provides a nonproton-translocating bypass of the cytochrome segment of the mitochondrial respiratory chain, maintaining electron flow under conditions in which it would be inhibited by high membrane potential, toxic inhibition, or insufficient capacity of cIII and/or cIV. tko 25t flies exhibit multiple respiratory chain deficiency, including profoundly decreased activity of both cIII and cIV (). However, whereas lactate dehydrogenase can theoretically compensate, at least in part, for the lack of cI (), ubiquinone-linked dehydrogenases, such as succinate dehydrogenase (complex II, cII), require the cytochrome chain for onward electron transfer to oxygen to reoxidize ubiquinol. Thus, even though it cannot directly support ATP production, AOX expression in tko 25t should facilitate intermediary metabolism, leading to an amelioration of the mutant phenotype if that phenotype is due to limitations on electron flow through cIII and cIV. We therefore set out to test whether expression of Ciona AOX in Drosophila at different times in the life-cycle could correct the major organismal phenotypes of tko 25t, namely bang sensitivity and developmental delay. MATERIALS AND METHODS Flies, maintenance, and behavioral assays Drosophila lines were as described previously (;;a). Flies were maintained at 25°on standard medium with supplements, as previously described ), including RU486 (Mifepristone), with indicated time to eclosion and bang sensitivity at 25°measured as previously described (). RNA isolation and analysis RNA extraction and QRTPCR were performed as previously described (). RNA isolations were performed in triplicate from batches of 40 males or 30 virgin females. For QRTPCR, cDNA was synthesized using High-Capacity cDNA Reverse-Transcription kit (Life Technologies, Carlsbad, CA). Analysis used a StepOnePlus instrument (Life Technologies) with the manufacturer's SYBR Green PCR reagents and customized AOX primers and normalization to RpL32 RNA as previously described ). Metabolic assays ATP levels in adult female flies were measured as previously described (), along with ATP standards. Mitochondrial reactive oxygen species (ROS) production was measured essentially according to Ballard et al. as hydrogen peroxide produced in whole-body mitochondrial extracts from 2-to 5-d-old females using a substrate mix of 5 mM pyruvate, 5 mM proline, 20 mM sn-glycerol-3-phosphate, and 1 mM ADP. RESULTS Transgenic expression of AOX in Drosophila using an inducible driver We previously documented the amount of expression of AOX at the RNA level in transgenic flies containing single and double copies of the UAS-AOX transgene activated by different ubiquitously acting drivers (). In the same study, using the drug-inducible tubulin-GeneSwitch driver (tub-GS), we determined the minimal level of the inducing drug RU486 (10 mM) that would sustain maximal AOX expression throughout development when flies were cultured in drug-containing food. To be able to induce and sustain AOX expression at different times during adult life, we first conducted further tests using the tub-GS driver ( Figure 1). Expression of AOX was induced in 1-d-old adults using different concentrations of RU486 and was measured 24 hr later using UAS-AOX-bearing flies with no driver or with the highly active da-GAL4 driver as controls ( Figure 1A). Even without drug, the tub-GS driver supported AOX expression at a three-fold to 10-fold higher level than in the absence of any driver. As observed previously using various drivers (), expression in males was always approximately three-fold higher than in females, which is probably a feature of the standard UAS transgenic construct and/or dosage compensation elements associated with the linked mini-white marker gene. RU486 even at low doses increased expression at least 10-fold further, and expression reached a plateau at a drug concentration of 100 mM. To be sure of fully activating expression, we thereafter routinely used 200 mM RU486 as the activating condition. Next, we determined the kinetics of induced expression and the effects of sustained drug exposure or its withdrawal (Figure 1, B and C). AOX expression already reached a plateau level after 1 d of drug exposure in females ( Figure 1B) and males ( Figure 1C); thereafter, it remained constant if flies were maintained on drug-containing food. If drug was withdrawn by switching to drug-free food at day five, then expression decreased to a new plateau level by 1 d later. However, this level was two-fold to three-fold higher than that of flies never exposed to drug. Flies endowed with UAS-AOX and tub-GS were cultured continuously on RU486-containing food for many weeks and remained phenotypically indistinguishable from flies grown on drug-free food. Adult-specific induction of AOX does not rescue bang sensitivity of tko 25t Bang sensitivity is generally considered to arise from a functional defect of nerve conduction during high-frequency stimulation in the giant fiber pathway (Pavlidis and Tanouye 1995;Lee and Wu 2002;;). Bang-sensitive mutants with an underlying mitochondrial defect, including kdn (citrate synthase) and sesB 1 (adenine nucleotide translocase) as well as tko 25t display a characteristic seizure pattern (). We therefore decided to test whether expression of AOX in tko 25t mutant flies could compensate for the mitochondrial defect and thus alleviate bang sensitivity. We crossed tub-GS into the tko 25t background using a balancer chromosome strategy to analyze progeny from a single experimental cross that generated flies carrying tko 25t, tub-GS, and/or UAS-AOX in all eight possible combinations. Bang sensitivity was tested in 2-d-old males and females of each class, either with or without transfer 24 hr earlier to food containing 200 mM RU486 (Figure 2). Unambiguously, the results indicate that AOX is unable to modify the bang-sensitive phenotype of tko 25t adults, and it does not induce any detectable bang sensitivity in control flies. In fact, applying Student's t test with Bonferroni correction confirmed that there were no significant differences between any of the classes that were mutant for tko 25t, irrespective of sex, transgene, driver, or RU486 induction. Similarly, there were no significant differences between any of the classes that were wild-type for the tko gene, irrespective of these other parameters. As expected, the difference between tko 25t mutant flies of each class and the corresponding class without tko 25t was significant (P, 0.01) in every case. Continuous induction of AOX throughout development does not rescue tko 25t Considering an alternative hypothesis, that the bang-sensitive phenotype of tko 25t is established during development, we conducted similar crosses but used fly food containing RU486. In our previous study (), we established that 10 mM RU486 was sufficient to induce maximal transgene expression during the larval stages, so we used this concentration of the drug along with drug-free control vials. This procedure allowed us also to analyze effects on the second canonical phenotype of tko 25t, developmental delay, which was previously found to occur uniquely during the larval (growth) stages (). Once again, we observed no rescue of the mutant phenotype that was attributable to AOX expression (Figure 3). The developmental delay of tko 25t mutant flies ( Figure 3A) was slightly greater in males than in females, as observed previously, and an additional delay of approximately 1 d was produced in flies of all genotypes and both sexes by the presence of RU486 in the food. The UAS-AOX transgene, the tub-GS driver, and the two in combination did not produce any significant change in developmental timing of tko 25t mutant flies, although there was a slight delay produced by AOX expression in wild-type flies, as reported previously using the da-GAL4 driver. The bang sensitivity of the progeny flies showed no significant change according to any of the parameters tested, except for the presence of the tko 25t mutation itself ( Figure 3B). Prolonged adult induction of AOX does not rescue bang sensitivity of tko 25t To test whether correction of the tko 25t phenotype in adult flies requires long-term expression of AOX, we cultured tko 25t flies generated in the previous crosses continuously for a period of 30 d on food either with or without RU486 at the inducing concentration of 200 mM, noting the previous result that sustained expression requires continuous exposure to the drug. This also enabled us to check the stability of the phenotype during adult life, which, to our knowledge, has not previously been studied systematically. Bang sensitivity was unaffected by any of the parameters tested in this experiment (Figure 4). There was no rescue (or worsening) of the phenotype either by basal or by induced AOX expression, no effect of age, no difference between the sexes, and no effect of tub-GS. Ndi1 expression during development is lethal to tko 25t Because AOX expression at any stage of the fly life-cycle had no effect on the major phenotypic features of tko 25t mutants, we considered the hypothesis that the steps in mitochondrial electron flow that AOX bypasses may not be crucial determinants of the phenotype. The tko 25t mutation impacts all four of the enzymatic complexes of the oxidative phosphorylation (OXPHOS) system that contain mitochondrial translation components (), but it is unclear which is limiting for respiration or ATP synthesis. Because complex I (cI) activity is severely affected by the mutation, we considered the alternative hypothesis that a decreased capacity for electron flow through cI alone underlies the tko 25t mutant phenotype, and that decreased capacity of complexes III and/or IV is immaterial, thus accounting for a failure of AOX expression to modify the phenotype. To test this idea, we set-up a genetic cross ( Figure 5A) to investigate whether an analogous bypass of cI using the nonproton-pumping NADH dehydrogenase from yeast (Ndi1) could rescue the phenotype. Ndi1 expression was shown previously to be benign in Drosophila and to rescue the lethality of severe knockdown of cI subunits (b). We introduced the ubiquitously acting da-GAL4 driver and a UAS-Ndi1 transgene separately into the tko 25t mutant strain and then crossed females heterozygous both for tko 25t and UAS-Ndi1 with tko 25t males carrying da-GAL4 ( Figure 5A). The cross repeatedly gave (Table 1). However, almost all of them carried the balancer marker in place of UAS-Ndi1, indicating that the combination of da-GAL4, tko 25t and UAS-Ndi1 is semi-lethal. Expression of Ndi1 in tko 25t heterozygotes had a far less dramatic effect. We conclude that, far from rescuing tko 25t, expression of Ndi1 is selectively deleterious to tko 25t mutant flies. This result raises the possibility that although neither Ndi1 nor AOX can individually rescue tko 25t, the co-expression of both transgenes might do so. This would be the case, for example, if the tko 25t phenotype were due to a combined limitation on electron flow at both cI and at cIII+cIV of similar magnitude. Although co-expression of Ndi1 and AOX at 25°was previously shown to be synthetically lethal even in wild-type flies (b), in trial experiments we were able to obtain co-expressing flies when cultured at 18°. We therefore implemented the experimental cross illustrated in Figure 5B to determine whether Ndi1 and AOX co-expression can rescue tko 25t. As shown in Table 2, although control flies were now obtained, and again there were only a few Ndi1-expressing flies in the tko 25t mutant background, no doubly expressing tko 25t flies eclosed. We conclude that, far from rescuing tko 25t, combined expression of the two transgenes is more deleterious than of either alone. Effects on ATP or ROS do not correlate with modulation of tko 25t phenotype In previous studies we found decreased steady-state ATP levels in extracts from tko 25t mutant flies, as well as elevated production of ROS in isolated tko 25t mitochondria (). However, the relevance of these observations to the organismal phenotype remains to be conclusively demonstrated. The effects of AOX and Ndi1 expression on the tko 25t phenotype provided an opportunity to test this relationship further. To obtain a sufficient number of tko 25t flies expressing Ndi1 to conduct this experiment, flies were reared at 18°i nstead of 25°(see previous section). We confirmed the previous observation of decreased ATP levels in tko 25t homozygotes compared with heterozygous controls ( Figure 6A) but found no significant alteration thereof when either AOX or Ndi1 was expressed. Mitochondrial ROS production in tko 25t homozygotes was also elevated in every case compared with heterozygous controls ( Figure 6B). This was unaffected by expression of AOX but modestly alleviated by Ndi1 expression, despite the fact that the effect of Ndi1 on the overall organismal phenotype was deleterious. This, plus the wide variation in ROS production according to genetic background (reflecting different balancer chromosomes), implies that the tko 25t organismal phenotype is also not directly determined by ROS. DISCUSSION In this work we set out to determine whether AOX from Ciona intestinalis can ameliorate the mutant phenotype of tko 25t, which carries a mutation in mitoribosomal protein S12, resulting in globally decreased OXPHOS capacity. We found that induced AOX expression, whether during development, in freshly eclosed adults, or maintained in adults over a period of 30 d, has no effect on tko 25t, nor does it produce a phenocopy of tko 25t in wild-type flies. In contrast, ubiquitous expression of Ndi1, the alternative NADH dehydrogenase from yeast, was highly deleterious to tko 25t during development and was lethal when combined with both tko 25t and AOX. Failure of AOX rescue suggests that a complex metabolic defect underlies the tko 25t phenotype tko 25t exhibits a functional deficiency of all four OXPHOS complexes containing mitochondrial translation products (), but it is unclear which of these is limiting for electron transfer. Because AOX provides a functional bypass of complexes III and IV, its failure to rescue the organismal phenotype can be interpreted in one of several ways. The first would be that the residual activity of cIII/cIV is not limiting for mitochondrial electron transport in tko 25t, and that the phenotype is entirely due to cI dysfunction. The second postulates that AOX is unable to rescue tko 25t because, as a nonproton-motive enzyme, it does not support the synthesis of ATP, and ATP deficiency is what underlies the mutant phenotype. A third possibility is that the phenotype is a consequence of one or more processes on which AOX does not impinge, such as elevated ROS production, or proteotoxicity due to the protein synthesis defect. Although none of these can be entirely eliminated, the fact that Ndi1 expression worsens the phenotype, either alone or in combination with AOX, and that changes in ATP level or mitochondrial ROS production do not correlate with it, suggest that the mutant phenotype is determined either by a complex interplay of factors or by other metabolic effects that are as yet unknown. Disrupted redox homeostasis resulting from a cI defect should be rescuable by Ndi1. A combined limitation on electron flow at cI and cIII and/or cIV should be alleviated by combined expression of Ndi1 and AOX. Manifestly, these predictions are inconsistent with our findings. Ndi1 is constitutively active (b), consistent with the fact that in its natural setting (in budding yeast) cI is absent. By diverting electrons away from cI, it may act to decrease net ATP production still further, but this seems unlikely to be the explanation for its effect on tko 25t because the apparent additional decrease in ATP level ( Figure 6A) was modest and not statistically significant. However, the low number of successfully eclosing flies may represent the tail of a distribution, with those individuals suffering further ATP depletion simply unable to complete development. Effects on mitochondrial ROS production also did not correlate with the organismal phenotype. Although we confirmed elevated ROS production in tko 25t flies ( Figure 6B), it was more affected by genetic background than by the expression of the alternative respiratory chain enzymes, and the effect of Ndi1 was again paradoxical. Note, however, that all metabolic assays were conducted on materials from flies reared at 18°, whereas for most of the phenotypic experiments reported here flies were cultured at 25°. This may have some bearing on the findings. Proteotoxicity due to imbalance between cytosolic and mitochondrial protein synthesis has been implicated as a longevity mechanism, acting hormetically via the induction of the mitochondrial unfolded Table 1. Note that FM7 / Y males do not contain an unmanipulated X-chromosome, so they are not strictly a wild-type control. protein response Arnsburg and Kirstein-Miles 2014). However, decreased levels of NAD+ are associated with a failure of this mechanism. The deleterious effect produced by Ndi1 expression is again not consistent with this being the primary mechanism underlying the tko 25t phenotype. The failure of AOX to rescue bang sensitivity and developmental delay in tko 25t reflects a similar finding for a second mutant affecting mitochondrial ATP production, sesB 1 (). sesB 1 carries a mutation in the gene encoding the major adult isoform of the adenine nucleotide translocase () and, like tko 25t, sesB 1 mutant flies show decreased steady-state ATP levels as well as bang sensitivity and developmental delay (). For these reasons, as well as the arguments stated above, we feel the "ATP hypothesis" cannot be entirely discounted, although other metabolic effects need to be further investigated as well. Bang sensitivity of tko 25t is a developmental rather than a degenerative phenotype Bang sensitivity is a commonly observed mutant phenotype in Drosophila and is due to lesions affecting a variety of cellular or physiological pathways, including, in addition to mitochondrial protein synthesis, adenine nucleotide transport and the TCA cycle (), phospholipid metabolism (), ion pumps and channels (;;;), and proteolysis (). Although they manifest some similarities in their electrophysiological defects (Engel and Wu 1994), they fall into two classes depending on whether motor neurons are directly affected (). Some of them show a clear degenerative phenotype with drastically shortened lifespan, whereas others, including tko 25t, show only a modestly decreased lifespan and associated neuropathology (). In the current study, we found no significant change in the bang sensitivity of tko 25t over 30 d of adult life, in contrast to the synergistic and progressive effects on bang sensitivity seen when tko 25t is combined with other bang-sensitive mutants (). We conclude that the bang sensitivity of tko 25t is a developmentally determined phenotype, at least in an otherwise wild-type genetic background Therapeutic implications for AOX in mitochondrial disease AOX has been proposed as a therapeutic tool relevant to a wide variety of mitochondrial disorders (). The present work indicates important limitations of this concept, whatever the precise link between mitochondrial translational dysfunction and the organismal phenotype in tko 25t. Despite profound effects on flies exposed to toxins or mutations directly or indirectly affecting cytochrome oxidase (;), or even the pleiotropic phenotypes caused by partial knockdown of DNA polymerase g (), AOX expression produced no detectable modification to the tko 25t phenotype. n tko 25t has been considered as a model for mitochondrial diseases, exhibiting not only seizures and developmental delay but also hearing impairment (). It is of particular relevance to those disorders where the primary defect is in the mitochondrial translation system, which applies to many of the commonest pathological mtDNA mutations such as the 3243G. A MELAS mutation, as well as an increasingly recognized subset of nuclear gene mitochondrial disorders exhibiting multiple OXPHOS deficiencies (). The implementation of respiratory chain bypasses such as AOX or Ndi1 should, in theory, alleviate pathological phenotypes associated with restrictions on electron transport, depending on which segments of the respiratory chain are affected. In cases where multiple OXPHOS complexes are affected, both bypasses in combination might be needed to restore electron flow. tko 25t constitutes a model for such diseases, yet neither AOX nor Ndi1 ameliorated the phenotype, and Ndi1 was even deleterious. As already indicated, Ndi1 and AOX do not restore proton pumping at the respiratory chain segments that they bypass, nor can they alleviate, a priori, all other aspects of mitochondrial dysfunction. Their uses in eventual therapy for disorders of mitochondrial translation therefore may be limited and clearly requires a fuller understanding of the pathophysiological mechanism case by case. Figure 6 Altered ATP and ROS levels do not account for phenotypic effects of AOX or Ndi1. Effects of Ndi1 and AOX expression on (A) ATP levels and (B) mitochondrial ROS production of female tko 25t flies of the indicated genotypes, reared at 18°. Flies were homozygous for tko 25t, except those carrying the FM7 balancer, which are phenotypically wild-type. Means 6 SD for three or more biological replicates of each genotype. Significant differences between tko 25t homozygotes and heterozygotes of otherwise identical genotypes, P, 0.01, Student's t test, two-tailed. #Significant differences between Ndi1 or AOX expressors and nonexpressors of otherwise identical genotypes, P, 0.05, Student's t test, two-tailed.
Escambia County Commission District 2 candidates exchanged policy ideas and verbal jabs at a Republican Party candidate forum Monday night. Commissioner Doug Underhill and his challenger Alan McMillan fielded questions submitted by an audience of more than 200 people at the Pensacola Library. Underhill and McMillan agreed on some issues like both being opposed to making Perdido Key Drive four lanes, but were divided on many others like what the county should do once it takes ownership of Navy Outlying Field 8 in Beulah. "I think this is an issue where you will see a big difference between Doug Underhill and Alan McMillan and how we view this," McMillan said. Underhill said he favors selling the property and recouping the $17 million the county invested to build a new Navy training field in Santa Rosa County to obtain the OLF 8 land in Beulah rather than spending what he said would be another $20 million in RESTORE Act money to build an industrial park. "It's easy to say we're going to create this facility and people are going to flock to it," Underhill said. "We cannot drink our own Kool-Aid on this economic development stuff, folks. It just isn't working that way. We have lots of empty industrial space already." Underhill also criticized McMillan for changing the score he gave the OLF 8 project while he served on the RESTORE Act committee. "At that time, it was, in my opinion, one of the finest proposals to create jobs," McMillan said. "Yes, $17 million is a lot of money, but if $17 million can create $200-$300 million, then there's a massive payoff in growth for us. Now, I'm not saying OLF 8 is perfect. Nothing is perfect. Whether or not it should be built just like it has been talked about being built or whether it should be modified." McMillan said the commission should listen to the people of Beulah to decide whether it goes forward as an industrial park or change the plans to a town center type of development. Responding to a question on what can be done to improve the Warrington area, Underhill said it was unfortunate McMillan did not know about the county's plan to revitalize Warrington that predated his term in office and he would send it to him. McMillan hit back at Underhill's remark about the plan. "Shame on you," McMillan said. "If there is a plan that has value, and you've had it for four years, and you've not moved it forward, shame on you." Underhill responded by bringing up McMillan's previous statements to conduct a civil campaign. "Well, I guess that civility slogan kind of went out the door," Underhill said. A question came about veterans and Underhill — who is a Navy veteran and commander of a Navy Reserve unit — talked about his personal experience coming back from deployments and not being ready to enter civilian life. McMillan said the forum was a good place to discuss the issue "because I see this as a real Republican issue." McMillan said it was the job of commissioners and citizens to provide the resources veterans need after serving in the military. Underhill took issue with McMillan tying veterans to the Republican Party. "I've famously said a couple of things in the last four years that have certainly enraged the other side of the aisle, so I'm as Republican as anybody ever will be," Underhill said. "… But Mr. McMillan, I can assure you this is not a Republican issue. That flag right there has red on it, and it has blue on it. And that flag is not that flag without both of them. And I never thought once about the man I was standing next to or the women I served alongside and wondered whether they were a Republican or a Democrat." During his next opportunity to speak, McMillan said he wanted to correct the record. "I don't think anybody misunderstood me by saying Democrats didn't care about the flag," McMillan said. "They all do. You bet they all do. Everyone should. Let me just correct that." McMillan, whose raised over $85,000 and has a political committee with over $100,000 supporting him, said he believes implications have been made in the race that the money he's raised is being used to "buy the seat." "Nothing could be further from the truth," McMillan said. McMillan said he's raised money from developers and other business people, but so has Underhill. He said developers are members of the community, stakeholders and deserve a voice like anyone else. "I think that's a red herring issue and I think it divides people," McMillan said. "And I think it's time for us to stop doing that, and that is using things that divide our community. Talking about the 'downtown crowd,' 'the good ol' boys,' 'we on the west side vs. them somewhere else.' I think that hurts our community, and I think it really brings all of us down when we do that." The Republican primary is on Aug. 28 and the winner will face Democrat Scott Trotter in the Nov. 6 election. Trotter was in the audience of the forum and told the News Journal after that he wasn't impressed with either candidate but had harsher criticism for Underhill. "Doug Underhill is a dynamic speaker," Trotter said. "He talks a really good game, but he lies constantly. Alan McMillan is at least honest. He didn't come across real good today, I don't think, but I trust what he says a lot more than what I trust Doug Underhill."
Republican vice-presidential nominee Gov. Mike Pence said Saturday that he does not condone the lewd remarks made 11 years ago by Republican presidential nominee Donald Trump but is grateful his running mate apologized and expressed remorse, and looks forward to Trump showing “what is in his heart” during Sunday night’s presidential debate. Pence said in a statement released Saturday: As a husband and father, I was offended by the words and actions described by Donald Trump in the eleven-year-old video released yesterday. I do not condone his remarks and cannot defend them. I am grateful that he has expressed remorse and apologized to the American people. We pray for his family and look forward to the opportunity he has to show what is in his heart when he goes before the nation tomorrow night. Trump apologized for the lewd comments in a late Friday night video statement. He said in part, “I’ve said and done things I regret and the words released today on this more than a decade old video are one of them. Anyone who knows me knows these words don’t reflect who I am. I sat, I was wrong and I apologize.” “I’ve traveled the country talking about change for America, but my travels have also changed me,” continued Trump. “I’ve spent time with grieving mothers who have lost their children, laid-off workers whose jobs have gone to other countries and people from all walks of life who just want a better future. I have gotten to know the great people of our country and I’ve been humbled by the faith they’ve placed in me. I pledge to be a better man tomorrow and will never, ever let you down.” Trump will face off against Democratic presidential nominee Hillary Clinton on Sunday night in a town hall-style debate. It is the second of three scheduled debates between Trump and Clinton before the November 8 general election. Pence squared off against Clinton’s running mate Sen. Tim Kaine on Tuesday in the only scheduled debate between the VP candidates. Even a Democrat-heavy CNN poll of debate watchers declared Pence the winner and found that almost 30 percent of those watching were more likely to vote for the Trump-Pence ticket after watching Pence’s performance. Follow Michelle Moons on Twitter @MichelleDiana
Key Factors Needed for Developing a Higher Education Cross-Campus Learning Environment in a Nordic Context The use of digital technologies and online tools to support both students and educators has become synonymous with transforming learning within Higher Education, particularly within post graduate courses. It can be argued that the recent push for transforming Higher Education aligns itself with the notion that postgraduate students need more flexible learning opportunities while still retaining access to high quality, engaging and collaborative pedagogical approaches. This paper reports on an exploratory case study that focuses on cross campus/university collaboration and flexible learning opportunities for students studying a masters level degree in the area of Music, Communication and Technology (MCT) within a Nordic context. The research question guiding the study is What factors do educators in a hybrid cross-campus learning environment identify as essential for providing a supportive learning experience for students? A pedagogy, space and technology (PST) framework underpins the development of this program and forms the basis for its development. The findings from our research identify three themes that need to be considered when attempting to design and implement high quality learning opportunities for students studying a largely synchronous hybrid music, communications and technology program. These themes were flexibility, trust and the human element, and ownership. The findings also highlight the need for a renewed focus on pedagogical approaches that can be adapted and continually revised to meet the changing needs of students in a synchronous hybrid learning space.
def interview(): pitches= Pitches.get_pitches_by_category(3) title = 'Interview' return render_template('interview.html', title = title, pitches = pitches)
//public final MetricRegistry METRICS = new MetricRegistry(); public void init() { if (dbFile == null || localIdx < 0 || allSrvs.isEmpty()) { System.err.println("dbnum, idx or allsrvs are not well-configured. " + "Please check the parameter."); System.exit(1); } String localAddrPort = this.getAllSrvs().get(this.getLocalIdx()); this.localAddr = localAddrPort.split(":")[0]; this.port = Integer.parseInt(localAddrPort.split(":")[1]); this.serverNum = this.getAllSrvs().size(); this.localStore = new OrderedRocksDBAPI(this.getDbFile()); /** * Sync Clients */ this.clientPools = new HashMap<>(this.serverNum); for (int i = 0; i < this.serverNum; i++) this.clientPools.put(i, new ConcurrentLinkedQueue<TGraphFSServer.Client>()); }
/* Call the callable object 'callable' with the "fast call" calling convention: args is a C array for positional arguments followed by values of keyword arguments. Keys of keyword arguments are stored as a tuple of strings in kwnames. nargs is the number of positional parameters at the beginning of stack. The size of kwnames gives the number of keyword values in the stack after positional arguments. kwnames must only contains str strings, no subclass, and all keys must be unique. If nargs is equal to zero and there is no keyword argument (kwnames is NULL or its size is zero), args can be NULL. Return the result on success. Raise an exception and return NULL on error. */ @NoException public static native PyObject _PyObject_FastCallKeywords( PyObject callable, @Cast("PyObject*const*") PointerPointer args, @Cast("Py_ssize_t") long nargs, PyObject kwnames);
Self-renewal and differentiation of stem cells in a biopotential murine leukemia: an in vitro model for differentiation therapy. PGM-2 is a variant of the transplantable PGM-1 leukemia of strain C3H/HeJ. Freshly explanted cells had lymphoid morphology with a CD5+ CD45R (B220)- IgM- phenotype. They were not viable in unstimulated cultures, but formed IgM+ lymphoid colonies in response to interleukin-2 (IL-2), IL-4, IL-5, IL-6, IL-7, and Steel factor, and macrophage colonies in response to IL-3. IL-3-stimulated colonies had no recloning potential, but colonies from IL-7 cultures gave rise to large numbers of secondary macrophage colonies in IL-3-stimulated cultures and secondary lymphoid colonies in IL-7-stimulated cultures. The latter ones could be serially transferred in vitro for several months, and formed typical PGM-2 tumors in vivo. IL-7-stimulated colonies could therefore be used to measure leukemic stem cells in vitro. Supramaximal IL-3 stimulation (2,500 U/mL) of suspension cultures was followed by an increase in overall cell numbers and a disappearance of leukemic stem cells, compatible with differentiation induction. This could not be counteracted by simultaneous stimulation with IL-7. However, lower IL-3 concentrations (500 U/mL) induced an expansion of the stem cell pool, possibly by facilitating density-dependent autostimulatory mechanisms involving endogenous production of IL-7. The system described is a simple in vitro model for differentiation therapy. It shows that leukemic stem cells can be induced by hematopoietic growth factors to undergo terminal differentiation, but the concentrations required for differentiation induction in stem cells are much higher than those required for other biologic effects. Submaximal stimulation may favor expansion rather than repression of the leukemic cell population.
Predicting the Stress-Strain Behavior of Woven Fabrics Using the Finite Element Method This work is a theoretical study of the mechanical behavior of two different weave types: plain and twill. Traditional methods permit the study of plain weaves but prove quite difficult for twill weaves. Indeed, the difficulties related to modeling the mechanical behavior of the twill weave are due to its very complex geometry and its nonsymmetry, which require the application of the finite element method. This method requires first a mathematical formulation of the problem and then a mesh of the basic cells of the plain and twill fabrics. The next step is to simulate shearing and tensile tests. Analyzing the results has proved to be very hard and thus demands a study of the stress field of the basic cell.
Epitaph 5 years ago by Adam White The Lawrence Arms have signed to Epitaph Records for the release of their highly anticipated new full length record. Bassist / vocalist Brendan Kelly made the announcement on his Bad Sandwich Chronicles blog just a few minutes ago: We went and met [Epitaph] in LA, we had some laughs and some grilled cheeses. We talked to Brett [Gurewitz] underneath the original painting of the Against the Grain cover (awesome!) and we thought what better way to come back from a long break than on the label that put out most of the records that got us into punk rock in the first place? The post, which you can find here, goes on to detail the band's relationship with longtime label Fat Wreck Chords (it's cool), how the move will affect the band's sound (it won't). We're still awaiting a release date, title and cover art for the new record. The band's most recent full length was 2006's Fat Wreck released Oh! Calcutta!. The most recent new music from the band is to be found on 2009's Buttsweat and Tears 7". An Evening of Extraordinary Circumstance, a concert DVD celebrating the band's 10th anniversary show in Chicago, was released last year.
#![no_std] #![no_main] extern crate panic_halt; use murax_pac::gpioa::GPIOA; use riscv_rt::entry; #[entry] fn main() -> ! { let gpio = GPIOA::take().unwrap(); gpio.DIRECTION.write(0xff); let mut cnt = 0; loop { cnt += 1; gpio.OUTPUT.write(cnt >> 16); } }
/// Make no updates in the datapath, but send a report on every ack. fn install_ack_update(&mut self) -> Scope { self.control_channel .set_program("AckUpdateProg", None) .unwrap() }
/** * Insert a branch to an ending label after the else-part of an if-statement. * * @param instList a list of instructions * @param symtab a symbol table * @param elseLabelIndex the symbol table index of the label for the beginning of the else-part of an if-statement * @return a symbol table index for the end label of an if-statement */ int emitThenBranch(DList instList, SymTable symtab, int elseLabelIndex) { char label[20]; makeLabel(label); char* inst = nssave(2,"\tjmp ",label); dlinkAppend(instList, dlinkNodeAlloc(inst)); emitEndBranchTarget(instList,symtab,elseLabelIndex); return SymIndex(symtab,label); }
Radio Regulation Compliance of NGSO Constellations' Interference towards GSO Ground Stations The commercial low earth orbiting (LEO) satellite constellations have shown unprecedented growth. Accordingly, the risk of generating harmful interference to the geostationary orbit (GSO) satellite services increases with the number of satellites in such mega-constellations. As the GSO arc encompasses the primary and existing satellite assets providing essential fixed and broadcasting satellite services, the interference avoidance for this area is of the utmost importance. In particular, non-geostationary orbit (NGSO) operators should comply with the regulations set up both by their national regulators and by the International Telecommunications Union (ITU) to minimize the impact of emissions on existing GSO and non-GSO systems. In this paper, we first provide an overview of the most recent radio regulations that dictate the NGSO-GSO spectral co-existence. Next, we analyze the NGSO-GSO radio frequency interference for the downlink scenario, following the so-called time-simulation methodology introduced by ITU. The probability distribution of aggregated power flux-density for NGSO co-channel interference is evaluated and assessed, adopting different degrees of exclusion angle strategy for interference avoidance. We conclude the paper by discussing the resulting implications for the continuity of operation and service provision and we provide remarks for future work.
Changes in the nasal mucosa of habitual nose-bleeders. 121 habitual nose-bleeders were investigated. In all but one, local changes were found in the nasal mucosa. Abnormal vessels (varicose vessels, telangioma, or a network of small vessels) were found in 102 (84%), and 18 (15%) had mucosal atrophy (rhinitis anterior sicca or septal perforation) without abnormal vessels. The occurrence of abnormal vessels was significantly increased when the bleeders were compared with subjects in a control group. Mucosal atrophy was also significantly more common in the habitual nose-bleeders but only in association with abnormal vessels. Varicose vessels were the most frequent abnormality in both the habitual nose-bleeders and the control group. Telangiomas were only present in the habitual nose-bleeders. Heredity seems to be important in explaining the occurrence of varicose vessels as well as telangiomas in habitual nose-bleeders.
Campus Admin: an Integrated Architecture for Automating Data-Related Processes in Education Managing courses and students at a university or other educational institutions includes manifold administrative tasks to be performed by different persons, e.g. secretaries, lecturers and system administrators. Most tasks are performed manually sometimes assisted by stand-alone software solutions that are not designed for collaboration and reuse of existing data. On this account, todays processing of administrative tasks in education is time-consuming, labor intensive and error-prone. In this paper we present an approach and an implementation integrating several data-related processes of students at our university with hundreds of students starting each year. Our implementation is composed of several modules distributed over the campus. Extensive tasks like creating personal login accounts and email-addresses are performed automatically without any human interaction leading to a higher quality of data and completion in time. Frequently occurring processes like managing the results of exams and exercises, admitting students and creating individual certificates are integrated seamless reusing the existing data of the students.
Differential regulation of mitogen-activated protein kinase kinase 4 (MKK4) and 7 (MKK7) by signaling from G protein beta gamma subunit in human embryonal kidney 293 cells. Heterotrimeric G protein beta gamma subunit (Gbeta gamma) mediates signals to two types of stress-activated protein kinases, c-Jun NH2-terminal kinase (JNK) and p38 mitogen-activated protein kinase, in mammalian cells. To investigate the signaling mechanism whereby Gbeta gamma regulates the activity of JNK, we transfected kinase-deficient mutants of two JNK kinases, mitogen-activated protein kinase kinase 4 (MKK4) and 7 (MKK7), into human embryonal kidney 293 cells. Gbeta gamma-induced JNK activation was blocked by kinase-deficient MKK4 and to a lesser extent by kinase-deficient MKK7. Moreover, Gbeta gamma increased MKK4 activity by 6-fold and MKK7 activity by 2-fold. MKK4 activation by Gbeta gamma was blocked by dominant-negative Rho and Cdc42, whereas MKK7 activation was blocked by dominant-negative Rac. In addition, Gbeta gamma-mediated MKK4 activation, but not MKK7 activation, was inhibited completely by specific tyrosine kinase inhibitors PP2 and PP1. These results indicate that Gbeta gamma induces JNK activation mainly through MKK4 activation dependent on Rho, Cdc42, and tyrosine kinase, and to a lesser extent through MKK7 activation dependent on Rac.
Seitan Ribs has become one of my favorite Vegan Barbeque Recipes! It is a very easy recipe to make and it is very delicious. I love this recipe is my favorite because it can be grilled, baked or finished up on a griddle. Not only are these super easy to make but they are kid and family friendly. These will not fool a meat eater, but I have shared them with my meat eating friends and family on a few different occasions and it always gets a great review. I made these for a family BBQ and asked my Father to grill them with the veggies, to avoid cross contamination with the animal products. It is not very often that I get to sit down with my family and enjoy a meal that is similar to the one they are eating. This recipe allows me to do just that! Happy Cooking! ~Stella~ Vegan Rib Recipe 1 cup gluten flour 2 tablespoon smoked paprika 2 tablespoon nut butter (I used peanut butter) 2 tablespoon onion powder 1 tablespoon garlic powder ¾ cup water 2 tablespoon nut butter 1 cup Vegan BBQ Sauce - Mix all of the ingredients in a bowl. It will get really stretchy. - Take the mixture and spread it out on a lightly greased baking sheet. - Score the ribs. Once through the middle and 3 times from t - Bake in a 350 degrees Fahrenheit oven for 25 minutes - Take it out of the oven and split it up into individual pieces or sets of two. - Use a brush to brush on BBQ sauce on both sides of the ribs - You can either grill them, cook them on a griddle, or bake them for 20 more minutes If you grill them: Photo By: Mick Parsons -Put them on a hot grill -Put them on a hot grill - Flip them (you want to have nice grill marks on them) - Add BBQ sauce as needed If you cook them on a griddle: - Put them on a griddle at the temperature of 400 degrees Fahrenheit - Let them cook 6-10 minutes per side - Add BBQ sauce as needed If you bake them: - Put them back in the oven at 350 degrees Fahrenheit for 20 minutes - Take them out and flip them after 10 minutes - Add BBQ sauce as needed
Analysis of DNA Denaturation This unit provides a detailed method for evaluating in situ DNA denaturation by flow cytometry. The principal technique is based upon the metachromatic properties of acridine orange. This technique has a number of advantages over traditional biochemical methods, but requires very precise methodology; the result is excellent clarity and differentiation between singlestranded and doublestranded DNA. This unit provides full details on the correct use of acridine orange together with an excellent discussion of the pitfalls and problems.
n = int(input()) x = 0 y = 0 silver_coins = 0 moves = input() for i in range(n): if moves[i] == "R": x += 1 if i == n-1: pass else: if x == y and moves[i+1] == "R": silver_coins += 1 else: y += 1 if i == n-1: pass else: if y == x and moves[i+1] == "U": silver_coins += 1 print(silver_coins)
Surgery for Metastases of Renal Cell Carcinoma The role of surgery for RCC in the era of emerging effective systemic therapy (usually immunotherapy) is not yet defined except for solitary metastasis. The retrospective analysis of patients subjected to aggressive surgical management after systemic therapy reinforces the need to find better therapeutic modalities in order to achieve complete eradication of metastatic disease. In the meantime, however, we propose these guidelines. First, we would encourage aggressive surgical resection of the clinically solitary metastasis, whether synchronous or metachronous. Continue to follow those patients indefinitely, because relapse is quite likely, but do not give adjuvant systemic therapy unless on protocol. Second, limited metastases in only one organ may behave similarly to a solitary metastasis, and if the metastases are in a site amenable to surgical resection, e.g., lung, initial surgery might be reasonable. Systemic therapy for these patients is highly recommended and need not necessarily wait for recurrence. Third, for patients with multiple metastases, initial systemic therapy followed then by resection of any residual disease in selected patients seems to be supported by the experience at several medical centers. Apparently prolonged survival times have been observed after systemic therapy followed by surgery in highly selected patients, despite finding viable cancer in the overwhelming majority of specimens. One must be mindful of the morbidity of an attempt to remove all known disease, however, and try to weigh this against potential benefit. Only a prospective, randomized trial could ever confirm the value of an aggressive surgical approach to metastatic RCC. In the meantime, however, metastasectomy offers, at the very least, the opportunity to confirm the histologic response to systemic therapy, render some patients disease-free, and possibly promote long-term survival in selected patients.
Good! Good! Good!! See it NOW!! If you have been looking for an older home that has been remodeled with a contemporary look and feel then you may have just found it. It is in the heart of the city, sits high on the hill with beautiful views, is close to downtown shopping and close to the MTU campus. It is a 3 bedroom, 2 bath home with other, special rooms that could be used in a variety of ways. It also has a wrap around deck, a very nice, wooded, natural garden yard, a paved patio and BBQ pit for outdoor entertaining. The updates are many and all rooms have an updated, contemporary look and feel. The roof was rebuilt in 2014, insulated, vented and at the same time, the attic was finished for even more living space. There is hardwood flooring in the living room, dining room, and upstairs. The kitchen was completely renovated with Italian tile counter tops, new sink, faucet, the cabinets stripped, refinished and doors replaced with beveled glass. There is also a dining area adjacent to the kitchen with patio door access to the new, paved patio and BBQ pit. Both bathrooms have been updated and the second floor bath has a new double sink. The main floor bath also incorporates the washer and dryer, saving you from carrying laundry to a basement laundry. The garage roof was replaced in 2012. The bedrooms are on the 2nd floor but other arrangements could be made in one of the main floor rooms. The main bedroom is very spacious and offers wonderful views of the Hancock hillside including Mont Ripley. Off the living room is a dining room with a wall of bookshelves so if a formal dining room is not needed this could be very nice, spacious home office. On the main floor there is a large, windowed family room that overlooks the deck and private part of the yard. It could also be used for other purposes. The living room and kitchen floors a a bit scratched up so, the seller will give a $1,000 flooring concession with an acceptable offer. The heating system has been well maintained and all piping has been replaced with copper or PEX tubing. Some windows have been replaced with double pane glass. Please contact your agent to make an appointment to see this good, good, good house soon. In fact, do it now!! Please allow 24 hours notice for showings as there is a tenant in the home now.
Journal of Proteomics & Bioinformatics Structural Role of Hydrophobic Core in Proteins-Selected Examples This paper discusses the sequence/structure relation. The core question concerns the degree to which similar sequences produce similar structures and vice versa. A mechanism by which similar sequences may result in dissimilar structures is proposed, based on the Fuzzy Oil Drop (FOD) model in which structural similarity is estimated by analyzing the proteins hydrophobic core. We show that local changes in amino acid sequences, in addition to producing local structural alterations at the substitution site, may also change the shape of the hydrophobic core, significantly affecting the overall tertiary conformation of the protein. Our analysis focuses on four sets of proteins: 1) Pair of designer proteins with specially prepared sequences; 2) Pair of natural proteins modified (mutated) to converge to a point of high-level sequence identity while retaining their respective wild-type tertiary folds; 3) Pair of natural proteins with common ancestry but with differing structures and biological profiles shaped by divergent evolution; and 4) Pair of natural proteins of high structural similarity with no sequence similarity and different biological function. Introduction The presented analysis concerns the well-known problem of correlating the protein's amino acid sequence with its 3D structure. The search for algorithms which can be used to translate the former into the latter is a fundamental problem in proteomics and often yields useful insight into the specific properties of individual proteins. A classic example of this phenomenon is the group of structures referred to as immunoglobulin-like domains. Such domains are present in all immunoglobulins (where they determine their function) but are also encountered in enzymes and transport proteins. Immunoglobulins exhibit high structural similarity, adopting characteristic "sandwich" conformations with rather low sequence similarity. Even among immunoglobulins domains the and sequences are identified. Of course, the diversity of proteins which are not immunoglobulins but which do contain immunoglobulin-like domains is even greater. In addition to the above, studies have revealed cases where very similar sequences produce significantly different structural forms. For example, the KGVVPQLVK sequence generates a classic -twist in 1PKY but adopts a helical conformation in 1IAL. The three 7-residue sequences which also share this property of different secondary structure for identical sequences are given in Jacoboni et al.. Conservative hydropathic identity at geometrically equivalent positions is the object of analysis in Krissinel. Our work focuses on structural differences in four pairs of proteins: 1) Pair of designer proteins with specially prepared sequences; 2) Pair of natural proteins modified (mutated) to converge to a point of high-level sequence identity while retaining their respective wild-type tertiary folds; 3) Pair of natural proteins with common ancestry but with differing structures and biological profiles shaped by divergent evolution; and 4) Pair of natural proteins of high structural similarity with no sequence similarity and different biological function. In attempting to show the role of hydrophobic core structure in structural stabilization we refer to the Fuzzy Oil Drop (FOD) model, which predicts the 3D conformation of the target protein by simulating the emergence of a hydrophobic core. While our research has identified some interesting correlations, generalizing them remains an open issue: in order to determine whether the presented results may, in fact, be generalized we need to process a much larger database of proteins. Data The presented analysis concerns four sets of two proteins each. The first set comprises two de novo designed proteins with a sequential similarity of 88% but with differing 3D structures. The second describes two natural proteins which are modified (mutated) in a stepwise fashion in order to align their sequences while preserving structural differentiation (helix-to-Beta). The third set includes two natural homologues with common ancestry. The fourth one discusses pair of natural proteins of high structural similarity with no sequence similarity and different biological function. with even higher levels of sequence identity (95%) and differing folds. Thus, conformational switching to an alternative monomeric fold of comparable stability can be effected with just a handful of mutations in a small protein. This result has implications for understanding not only the folding code but also the evolution of new folds. The CATH classification for these two proteins is as follows: 1. 10 Wild type proteins with aim-oriented mutations Two proteins: G311 (1ZXG) and A219 (1ZXH) are modified versions of wild-type proteins designated G and A (IgG binding domains, source: Staphylococcus aureus for 1ZXG and Streptococcus sp. for 1ZXH) respectively. The series of mutations aimed at achieving a high level of sequence identity while preserving wild-type 3D structures. Both proteins (G311 vs. G and A219 vs. A) represent backbone RMS-D 1.4, maintaining wild-type secondary structures: / for G311 and helical for A219. The final sequence identity of both modified proteins is on the level of 59%. All relevant data was taken from a paper describing experimental results of protein modifications. The differences between homologous proteins are due to evolutionary pressure. In the presented case the sequence identity is 40%, yet the -helix is replaced by a -sheet in the C-terminal region spanning approximately 25 residues. According to Roessler et al., sedimentation analysis suggests a correlation between helix-to-sheet conversions, along with strengthened dimerization. Introduction to the Fuzzy Oil Drop (FOD) model The Fuzzy Oil Drop (FOD) model is a modification of the previously described oil drop model which asserts that hydrophobic residues tend to migrate to the center of the protein body while hydrophilic residues are exposed on its surface. The FOD replaces the binary discrete model with a continuous function peaking at the center of the molecule, which causes hydrophobicity density values to decrease along with distance from the center, reaching zero on the molecular surface. The idealized, theoretical hydrophobicity distribution is expressed by 3D Gauss function. The size of molecule shall be expressed by sigma parameters for Gauss function. The characteristics of this function allows represent the hydrophobicity distribution with maximum in the center of ellipsoid with decrease together with the increase of distance versus the center reaching zero level in the distance equal to 3sigma in any direction. This idealized distribution ensures high solubility since the entire ellipsoid is covered by the hydrophilic shell. On the other hand the actual distribution of hydrophobicity density observed in a protein molecule depends on inter-chain interactions, which, in turn, depend on the intrinsic hydrophobicity of each amino acid. Intrinsic hydrophobicity can be determined by experimental studies or theoretical reasoning-our work bases on the scale published in Kalinowska et al. while the force of hydrophobic interactions has been calculated using other scales as it was shown in Kalinowska et al.. For each amino acid j (or, more accurately, for each effective atom) the sum of interactions with its neighbors is computed and subsequently normalized by dividing it by the number of elementary interactions (following the function proposed in Levitt. The two hydrophobicity density distribution profiles: the expected (T) and observed (O) distribution an be compared quantitatively. Quantitative expressing of the differences between the expected (T) and observed (O) distribution is possible using the Kullback-Leibler divergence entropy formula : The value of D KL expresses the distance between the observed (p) and target (p 0 ) distributions, the latter of which is given by the 3D Gaussian (T). The observed distribution (p) is referred to as O. For the sake of simplicity, we introduce the following notation: Since D KL is a measure of entropy it must be compared to a reference value. In order to facilitate meaningful comparisons, we have introduced another opposite boundary distribution (referred to as "uniform" or R) which corresponds to a situation where each effective atom possesses the same hydrophobicity density (1/N, where N is the number of residues in the chain). This distribution is deprived of any form of hydrophobicity concentration at any point in the protein body: Comparing O|T and O|R tells us whether the given protein (O) more closely approximates the theoretical (T) or uniform (R) distribution. Proteins for which O|T>O|R are regarded as lacking a prominent hydrophobic core. To further simplify matters we introduced the following Relative Distance (RD) criterion: RD<0.5 is understood to indicate the presence of a hydrophobic core. Figure 1 presents a graphical representation of RD values, restricted (for simplicity) to a single dimension. D KL (as well as O|T, O|R and RD) may be calculated for specific structural units (protein complex, single molecule, single chain, selected domain etc.) In such cases the bounding ellipsoid is restricted to the selected fragment of the protein. It is also possible to determine the status of polypeptide chain fragments within the context of a given ellipsoid. This procedure requires prior normalization of O|T and O|R values describing the analyzed fragment. RD can be calculated for entire units (protein, chain, domain) and for selected fragment (following normalization of T i and O i values of the fragment under consideration). The above procedure will be applied in the analysis of proteins described in this paper. By restricting our analysis to individual fragments, we can determine whether a given fragment participates in the formation of a hydrophobic core. In particular, fragments of chain representing well defined secondary folds which satisfy RD<0.5 are thought to contribute to structural stabilization, while fragments for which RD≥ 0.5 are less stable. Such fragments, if present on the surface of the protein, may potentially form complexation sites. The fragments of chains are defined by their secondary structure. Identification of secondary structural folds and the composition of protein domains follow the CATH and PDBsum classifications. Likewise, interdomain/inter-chain contacts have been identified on the basis of the PDBsum distance criteria. The graphic presentation of RD interpretation is shown in Figure 1. The OORF system of RD calculation uses the method from ORF calculation in DNA analysis. OORF stands from Overlapped Open Reading Frame. The window of declared size (10 aa in our analysis) is taken as the fragment, the RD value is calculated. For example fragment 1-10 gets described by its RD value. Then the next window (2-11 aa) is taken for RD calculation. The RD value for each window requires prior normalization (the sum of T i and O i belonging to the window shall be equal to 1.0). This form of calculation makes possible characteristics of entire chain regardless the secondary structure. The detailed description of the FOD model is available in the paper recently published. De novo designed proteins According to results given in Table 1 the structure of 2JWS (G A 88) is consistent with the model both as a whole and in its packed section (without the N-terminal fragment 1-7 which was eliminated from calculation since the FOD model works well with globular proteins). This operation does not affect the status of helical folds. In 2JWU (G B 88) four -folds can be distinguished, in addition to a single helix. This molecule also contains a loop. The -fragment at 42-46 and the loop both diverge from the model even though in G A 88 the same residues form parts of an accordant helix. The fragment at 38-46 is characterized by higher-than-expected hydrophobicity density ( Figure 2A). Since this fragment is exposed on the protein surface (expected hydrophobicity is low), it may be responsible for possible forming complexes with other proteins which also expose hydrophobic areas on their surface. The consistently high accordance of 2JWS-both as a whole and when subdivided into folds-suggest a relative lack of local deformations. It may be speculated that as predicted by the FOD model, this molecule is highly water-soluble with low tendency to interact with any ligand molecule. Figures 2A and 2D present the hydrophobicity density profiles for both proteins, showing the values ascribed to each residue in the polypeptide chain. The distinguished fragments satisfy the condition of high expected and high observed hydrophobicity in both molecules. From the point of view of the model both molecules contain well-defined hydrophobic cores. One shall notice that the FOD model identifies the central part of molecule as the hydrophobic core together with the shell of intermediate coat including the exposed surface of expected hydrophobicity close to zero (hydrophilic surface). The co-existence of these two parts makes the hydrophobic core complete as protected and isolated against immediate contact with water environment. The identification of residues recognized as hydrophobic core members is based on the high expected and high observed hydrophobicity. Residues following this condition are recognized as responsible for hydrophobic core construction. The profiles shown in Figures 2B and 2C (OORF distributions) reveal significant differences pointing different fragments of low RD values suggesting high accordance between expected and observed distributions in both proteins. The observations listed above seem to support the conclusion that both proteins are structurally different in terms of their hydrophobic Figure 1: Graphical representation of fuzzy oil drop model hydrophobicity distributions obtained for a hypothetical protein reduced to a single dimension for simplicity. A) Theorized Gaussian distribution (blue) while the chart C corresponds to the uniform distribution (green). Actually observed (red) hydrophobicity density distribution in the target protein B, while its corresponding value of RD (relative distance), and in D is marked on the horizontal axis with a red diamond. According to the fuzzy oil drop model this protein does not contain a well-defined hydrophobic core, because its RD value, equal to 0.619, is above the 0.5 threshold (or-generally-closer to R than T). cores. The construction of hydrophobic core in 2JWS is generated by central part of polypeptide chain, comprising two fragments ( Figure 2A) while in 2JWU, it requires three separate fragments to participate in core generation ( Figure 2D). At this point it might be interesting to speculate about the progress of the folding process in each of these two cases. In 2JWS the hydrophobic core nucleates near the center of the chain, with the remaining sections aligning themselves to the emerging core. While in 2JWU the nucleation is mainly constructed by N-and C-terminal fragments with the participation also of central fragment of the chain. In summary, the introduction of seven mutations (G24A, I25T, I30F, I33Y, L45Y, I49T, L50K-with 2JWS serving as the reference strain) results in a far higher concentration of hydrophobic residues in 2JWS. This enlarges the hydrophobic core which is formed by the central fragment of the polypeptide chain. Unlike 2JWS, in 2JWU the core is made up of three separate fragments, including one that forms part of the shell (with lower hydrophobicity density). Substitutions at G42A, I25T, I30F, I33Y, L45Y result in the appearance of a long fragment which forms part of the hydrophobic core, while the presence of Y, T and K in the C-terminal fragment of 2JWU causes a hydrophobicity density gradient to emerge in the surface zone where hydrophilic residues appear, in accordance with the theoretical model. Figure 3 shows clearly the influence of mutations since the residues changed concern the positions of the central part in 2JWU. The location of these residues in 2JWU is rather distributed. In consequence different fragments of the chain participate in hydrophobic core formation and one fragment (the -structural fragment) appears to represent the hydrophobicity density distribution discordant versus the idealized one. Wild type proteins with aim-oriented mutations The results listed in Table 2 indicate very high agreement with the FOD model in two compared proteins: 1ZXH and 1ZXG-two IgG binding domains. Additionally, each secondary structure (including loops) remains consistent with structural predictions provided by the model. Considering the large set of molecules analyzed using the FOD model we can conclude that the presented proteins are among the most accordant in the entire set, as indicated by their RD values (so far RD=0.38 for the immunoglobulin-like domain in titin (1TIT) was found to be the lowest). The structure of the hydrophobic core, which is understood as the entire tertiary conformation of the protein (including the core itself and its hydrophilic sheath) remains highly consistent with theoretical predictions, as shown in Figures 4A and 4B. Figures 4C and 4D illustrate the agreement between theoretical and observed hydrophobicity density distributions, with correlation coefficients of 0.847 and 0.784 for G311 and A219 respectively. The figures also reveal highly hydrophobic (core) and hydrophilic (surface) residues, whose placement can be seen in Figure 5A. Finally, Figure 5B marks the loci of point mutations-though the affected residues do not clearly belong either to the core or to the hydrophilic sheath. Figure 5 visualizes the positions of mutations and their influence on the hydrophobic core rearrangement. Analysis of results for G311 and A219 indicates that tertiary structural stabilization (by hydrophobic core) appears to be dependent on a proper distribution of hydrophobicity density, ensuring the presence of a highly hydrophobic core as well as the encapsulating hydrophilic sheath, with near-zero hydrophobicity density values on its surface. Unfortunately the authors of the cited experimental work do not report on the relation between the introduced mutations and the proteins' capability to bind immunoglobulins. From the point of view of the FOD model, however, the mutated molecules should be less prone to complexation than their wild-type counterparts. This supposition follows from the observed good agreement between the theoretical and observed hydrophobicity density distributions-note that complexation sites are typically characterized by marked differences between both profiles (theoretical and observed). According to the model, a protein which only exposes hydrophilic residues on its surface should be highly soluble and incapable of interacting with any ligands other than dissolved ions. This phenomenon is evidenced by antifreeze and down- hill proteins, which exhibit near-perfect accordance with the theoretical hydrophobicity density distribution. The role of these proteins is to be well soluble without any specific interaction with any molecules from environment except water to not allow the ice-structuralization of water. Homologous proteins Two proteins of common ancestry: 2PIJ and 3BD1 are characterized in Table 3. Both of them represent well defined hydrophobic core (distribution of observed hydrophobicity density is similar to expected one-RD below 0.5). Two secondary structural fragments were recognized as locally discordant in 2PIJ and one in 3BD1. Analysis of RD values calculated using the OORF system for two homologous proteins reveals differences in the structure of their hydrophobic cores. In 3BD1 nearly the entire chain remains consistent with theoretical predictions (with the exception of several frames in the C-terminal section of the chain). The OORF profile visualize opposite role of certain fragments of the chain. In 2PIJ the central fragment (20-30 windows) represent local maximum, while analogical fragment in 3BD1 reaches its lowest level of RD values. In 2PIJ the RD parameter reaches higher values, especially in the central and the C-terminal fragment of the chain. Both distributions are characterized by low values for the N-terminal fragment (positions 1-10) where the RD parameter does not generally exceed 0.5. The presence of a complexation partner (marked "P" in Table 3) or a ligand ("L") does not seem to affect hydrophobicity density distribution in the relevant areas. In general, whenever ligand interaction requires a large discordant cavity, the corresponding deviation can usually be noted by deficiency of hydrophobicity density which can be identifying on the distribution profile (which is not the case here). Similarly, protein complexation often occurs in areas of excess hydrophobicity exposed on the protein surface -which, again, is not the case with the presented protein. By comparing the results presented in Table 3 and Figure 6, we can conclude that 3BD1 possesses a more stable structure, resembling the idealized "fuzzy oil drop" (i.e., with limited differences between the Colors indicate the status of each fragment: red areas diverge from the model while cyan ones remain consistent, as shown in Table 3. Unrelated proteins of common fold Two proteins: 3CHY wild-type CheY from Escherichia coli, where residue Asp-57 (supported by Lys-109) undergoes phosphorylation and 1RCF -oxidized recombinant flavodoxin from the cyanobacterium Anabaena 7120 responsible for electron transfer from photosystem I to ferredoxin-NADP(+) reductase. The distributions of expected and observed hydrophobicity density distribution in both proteins reveal the high similarity between these two profiles. It is also expressed by low values of RD: 0.300 (O|T=0.089, O|R=0.207) for 1RCF and RD=0.443 (O|T=1.147, O|R=0.185) for 3CHY. However the secondary fragments representing the status of RD>0.5 were found. The helical fragment in 3CHY appeared to represent the status discordant versus the model as well as the loop. Two -structural fragments in 1RCF represent the status discordant versus the model as well as the loop 90-98 ( Figure 8). The location of fragments representing the distribution of hydrophobicity density in 3D structure of both proteins appeared to be different. The fragments placed rather on the surface of protein in 3CHY represent the discordant status while in 1RCF the dissimilarity versus the model is occurring in the central part of the molecule (Figures 8A-8D). This observation may suggest different instability of these two molecules, assuming that other than regular ordered hydrophobicity density distribution may influence the local stability. The biological activity of 3CHY requires complexation with other protein molecule. The discordance identified on the protein surface suggests potential area ready for complexation as may be concluded from the FOD model. Local instability (local discordance observation versus -expectation) in this case implies substantially different potential tendency to structural differentiation. The lower stability may be supposed in 1RCF as the disagreement is localized in the core of the molecule makes the structural less stable in comparison to analogical -structural part in 3CHY (Figure 9). It may suggest the easier destabilization of entire molecule (1RCF) while the stable core in 3CHY may protect the molecule against decomposition of the central part of the molecule. One shall note that the divergence entropy used to measure the differences between profiles recognizes as different positions of opposite tendency. Even large surface between profiles may be ignored by divergence entropy calculation as long as two profiles represent similar tendency. Green space-filling-residues engaged in biological activity (according to Volz and Matsumura ) Yellow fragments-RD above 0.5 recognized according to OORF calculation. Red fragments-discordant fragments according to RD calculated for secondary fragments. These two proteins not related one to the other (sequence similarity of only 19% (Clustal2.1 calculation with standard parameters) shows that the structural similarity does not necessarily ensures similar stability of the protein taken the interpretation of FOD model as the criteria for stability estimation. Discussion and Conclusion The study of sequence-to-structure correlations in proteins has a long history. This work hints at the importance of the hydrophobic core in determining the protein's tertiary conformation. Our observations support the suggestions contained in Bakker, where the authors conclude that protein structure remains tolerant to residue substitutions as long as the hydropathic profile of the sequence is preserved. Since water is an important factor in this process, much effort has been directed towards analyzing the influence of the proteins' aqueous environment. They way in which residue sequences encode 3D structures remain a fundamental question in biology. One approach to understanding the folding process is to design a pair of proteins with maximum sequence identity but with differing folds. Therefore, the nonidentity's must be responsible for determining which fold topology prevails and constitute a fold-specific folding code. The intentionally designed proteins G A 88 and G B 88, with 88% sequence identity but different folds and functions are described here in the context of the FOD model. Despite a large number of mutations which together bring sequence identity from 16% to 88%, G A 88 and G B 88 maintain their distinct wild-type 3-alpha and alpha/beta folds, respectively. As the Alexander et al. claim, the 3D-structure determination of two monomeric proteins with such high sequence identity but different fold topology is unprecedented. The geometries of seven nonidentical residues (of 56 total) provide insight into the structural basis for switching between 3- and / conformations. The FOD model applied to these two de novo designed proteins, to two wild-type proteins with intentionally modified sequences as well as to two homologous proteins, reveals the importance of hydrophobic core structure. Our analysis proves that the hypothesis expressing the dominant role of hydrophobic interactions in tertiary structural stabilization can be confirmed quantitatively. Additionally the role of hydrophobic core in stabilization of structures of natural proteins modified (mutated) to converge to a point of highlevel sequence identity while retaining their respective wild-type tertiary folds, of natural proteins with common ancestry but with differing structures and biological profiles shaped by divergent evolution as well as of natural proteins of high structural similarity with no sequence similarity and different biological function was recognized as main mechanism for structure stabilization as expressed by FOD model. The FOD model posits a structure which consists of a hydrophobic core (central part of the protein body) together with a sheath acting as a buffer zone between the hydrophobic center and the hydrophilic surface. The role of water in the protein folding process and its influence on the final structure of the protein remains a persistent subject in molecular biology ; however, the question of generalizing the presented observations remains an open issue. The application of FOD model for amyloidosis mechanism is presented Roterman et al.. The applicability of FOD was tested on few selected sets of proteins of small size, structural similarity, protein complexes and intrinsically disordered proteins.
<gh_stars>0 s = 0 c = 1 while True: a = int (input ("Enter number: ")) s += a if (a != 0): c *= a if a == 0: print ("summ: ", s) print ("composition: ", c) break
Pierre-Henri Gourgeon, CEO of Air France KLM, promoting new domestic routes in France last week. IT offers the prospect of Sydney to London becoming a short-haul flight and travellers arriving in Tokyo before the credits for the on-board movie roll. A Formula One tycoon is linking up with KLM, the Dutch airline, to develop spacecraft that could bring every city on Earth within two hours' travel time. This week they will reveal the first British passenger to buy a $A97,000 ticket for a ride on an early version of the craft, providing sub-orbital flights for space tourists. Read Next Their ambition, however, is to pioneer commercial space travel with, they hope, the first scheduled flights within 15 to 20 years. Michiel Mol, 42, a Dutchman who co-owns the Force India F1 team and made his fortune in computer software, said this weekend: "Being able to travel from London to Sydney in an hour and 45 minutes, that is the future. It is also the reason why KLM joined our firm [Space Expedition Curacao] as a partner. "They themselves started a hundred years ago with sightseeing tours above Amsterdam, and now we have an immense aviation industry." Peter Hartman, chief executive of the airline, said: "KLM supports this innovative project. The SXC programme's aim is to make space flights - the future of travel - accessible in a responsible and sustainable way by developing and promoting new technologies." So far seven people have paid for commercial flights into space, paying sums of up to $A32m each to ride on Russian rockets. Sir Richard Branson intends to become the first private space tourism operator, booking seats at $A209,000 each. Mol intends to follow suit in early 2014 and says he has already sold 35 tickets at pounds $A97,000 for flights from the Caribbean island of Curacao. Regulatory approval is still under negotiation. His first spaceship, the Lynx, from the Californian firm XCOR Aerospace, will be unveiled next spring and will, he claims, feature breakthrough technology with a reusable engine. "It's the first time a spaceship will be capable of doing four flights a day and of doing 5,000 flights with one engine," he said. Passengers, who will be entitled to call themselves astronauts if they reach an altitude of 100 kilometres, will be required to pass physical tests which he says are no more stringent than would be expected of an air steward. The first generation spaceship will travel at 2,200mph, but the second generation will need to reach a velocity of 13,750mph to achieve the desired orbit. Although Mol concedes this is "a long way off", he adds that once the craft is in space "where you are going doesn't make much difference. You need 10 minutes to get into space and maybe half an hour to decelerate and land again, and the rest of the time you are flying at 12,000-13,750mph". "Flying from London to Barcelona would still take an hour or so while London to Tokyo would be about 1hr 30min and London to Sydney 1hr 45min. " The cheapest ticket prices on Concorde were pounds 6,200 before a crash in 2000 led to the suspension of services. Mol says the viability of long-haul space travel will depend on similar price levels. "When we get to the point that travelling through space is two or three times [the cost of] a business-class long-distance flight then I think there will be millions of people who would prefer to be in Sydney in a little more than 1 and a half hours instead of 24 hours." Mol has committed less than $A161m for the space tourism venture and concedes he will need massive outside investment to make space travel a reality. His father, Jan Mol, who is worth $A653m according to The Sunday Times Rich List, will co-host the British launch of the venture with Marie Claire von Alvensleben in London this week. Anton Kreil, the first Briton to pay Mol for a sub-orbital flight, admitted he was partly motivated by ego. "I will be an astronaut. I will no longer have to listen to people who brag about their new Aston Martin." Kreil, 32, who starred in the BBC TV programme, Million Dollar Traders, said he believed commercial space travel was close to becoming a reality. "I was brought up in a four-bedroom terrace house in Liverpool, and there are going to be a lot of normal people going," he said. "I believe we are only 20 years away from this becoming a viable and affordable means of transportation." Setting the agenda for Australia's $150BN agribusiness sector The program for Australia's premier agribusiness conference - The Global Food Forum - is set. Hear from more than 30 industry leaders including PepsiCo's CEO, Danny Celoni, Jayne Hrdlicka, CEO of A2 Milk Company, Barry Irvin, Executive Chairman, Bega Cheese and Costco's Managing Director, Patrick Noone. Sheraton Grand Sydney Hyde Park Book Now
/** * Test Wicket application class for mocking out the Spring web * context. */ private final class TestCheesrApplication extends CheesrApplication { @Override protected void initSpringInjector() { addComponentInstantiationListener(new SpringComponentInjector( this, ctx)); } }
/** * Created by rantunes on 29/11/15. */ public class TestUtil { public static <T> Set<T> asSet(T... elements) { return new LinkedHashSet<>(Arrays.asList(elements)); } public static <T> Collection<T> convertToCollection(Stream<T> stream) { return stream.collect(Collectors.toList()); } }
/* * Created from 'scheme.tl' by 'mtprotoc' * * Copyright (c) 2021-present, Teamgram Studio (https://teamgram.io). * All rights reserved. * * Author: teamgramio (<EMAIL>) */ package core import ( "fmt" "github.com/teamgram/proto/mtproto" "github.com/teamgram/teamgram-server/app/service/authsession/authsession" ) // AuthsessionGetUserId // authsession.getUserId auth_key_id:long = Int64; func (c *AuthsessionCore) AuthsessionGetUserId(in *authsession.TLAuthsessionGetUserId) (*mtproto.Int64, error) { keyData, err := c.svcCtx.Dao.GetAuthKey(c.ctx, in.GetAuthKeyId()) if err != nil { c.Logger.Errorf("session.getUserId - error: %v", err) return nil, err } else if keyData == nil || keyData.PermAuthKeyId == 0 { return nil, fmt.Errorf("not found keyId") } userId := c.svcCtx.Dao.GetAuthKeyUserId(c.ctx, keyData.PermAuthKeyId) return mtproto.MakeTLInt64(&mtproto.Int64{ V: userId, }).To_Int64(), nil }
LOWNDES CO., GA (WALB) - After being held up by Hurricane Michael, officials say the weather is finally cooperating for the completion of the Naylor Boat Ramp. “We actually completed the parking lot, one week before Hurricane Michael," said Chad Mcleod, Project Manager. Afterwards, there were still weeks worth of labor left to be done and a river sitting well above where they needed it to be to continue with the project. “I would say they were at least 15 feet above where we needed it to be," said Mcleod. This past Monday, they were able to start work on the $75,000 boating ramp. “We’re just really looking forward to adding a quality of life amenity here in Lowndes County so that citizens have a public access enjoy our beautiful waterways," said County Spokesperson, Paige Dukes. Not just for Lowndes County residents, but tourists looking to come visit the area or possibly make it their home. “We hope that other people will come into Lowndes County and enjoy this location as well as spend some of their money here while they’re in Lowndes County," said Dukes. It will be ready just in time for the summer. “It gets very hot here in South Georgia during the summer months. We all are looking for a little cool water and some respite from that heat, so this will be a great location for people to come out and bring their families," said Dukes. Officials said the project is expected to be finished in the next six weeks.
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact, delete_contact_and_address class Shareholder(Document): def onload(self): """Load address and contacts in `__onload`""" load_address_and_contact(self) def on_trash(self): delete_contact_and_address('Shareholder', self.name) def before_save(self): for entry in self.share_balance: entry.amount = entry.no_of_shares * entry.rate
import React, { FC, ReactElement, useCallback, useEffect, useState } from "react"; import SortableTree, { NodeRendererProps, TreeItem } from "react-sortable-tree"; import { Props as TooltipProps } from "react-tooltip"; import { AutoSizer, Index, ScrollParams } from "react-virtualized"; import { theme } from "../../theme"; import { Size } from "../../types"; import { Tooltip } from "../Tooltip"; import { Node, NodeProps } from "./Node"; import { StyledTree, StyledTreeAlt } from "./style"; import _ from "lodash"; type NodeContentRendererType = FC<Readonly<NodeContentRendererProps>>; interface NodeContentRendererProps extends NodeRendererProps {} export interface TreeProps { readonly nodes: Array<Readonly<NodeProps>>; readonly onChange?: (nodes: Array<Readonly<NodeProps>>) => void; readonly isAlternative?: Readonly<boolean>; readonly size?: Readonly<Size>; readonly rowHeight?: ((info: Index) => number) | number; readonly withTooltips?: boolean; readonly TooltipProps?: TooltipProps; readonly defaultScrollTop?: number; readonly onScroll?: (params: ScrollParams) => void; } export const Tree: FC<TreeProps> & { Node: FC<Readonly<NodeProps>>; } = (props): Readonly<ReactElement> => { const { isAlternative, size, nodes, withTooltips, TooltipProps, defaultScrollTop = 0 } = props; const TreeComponent = isAlternative ? StyledTreeAlt : StyledTree; const [treeData, setTreeData] = useState<Array<Readonly<TreeItem>>>(nodes); const [scrollTop, setScrollTop] = useState<number>(defaultScrollTop); useEffect(() => { setScrollTop(defaultScrollTop); }, [defaultScrollTop]); useEffect(() => { setTreeData(nodes); Tooltip.rebuild(); }, [nodes]); const onChange = treeData => { setTreeData(treeData); props.onChange?.(treeData); }; const onScroll = (params: ScrollParams) => { Tooltip.rebuild(); setScrollTop(params.scrollTop); props.onScroll?.(params); }; const NodeContentRenderer: NodeContentRendererType = useCallback( props => { const { node, treeIndex, toggleChildrenVisibility, path } = props; return ( <Tree.Node key={treeIndex} children={node.children} isAlternative={isAlternative} onClick={() => toggleChildrenVisibility?.({ node, path, treeIndex })} title={node.title} expanded={node.expanded} size={size} level={path.length} iconOpen={node.iconOpen} iconClose={node.iconClose} tooltip={node.tooltip} /> ); }, [isAlternative, size] ); let rowHeight: string = theme.height.md; if (size === "small") { rowHeight = theme.height.sm; } else if (size === "large") { rowHeight = theme.height.lg; } return ( <TreeComponent size={size}> {withTooltips && ( <Tooltip id="tree-tooltip" place="top" {...TooltipProps} /> )} <AutoSizer disableWidth> {({ width, height }) => ( <SortableTree style={{ height, width }} treeData={treeData} onChange={onChange} canDrag={false} rowHeight={props.rowHeight || parseInt(rowHeight, 0)} nodeContentRenderer={NodeContentRenderer} reactVirtualizedListProps={{ scrollTop, onScroll, onRowsRendered: () => Tooltip.rebuild() }} /> )} </AutoSizer> </TreeComponent> ); }; Tree.defaultProps = { isAlternative: false, size: "medium" }; Tree.Node = Node; Tree.displayName = `Tree`;
package com.centurylink.mdw.adapter; import org.json.JSONException; import org.json.JSONObject; import com.centurylink.mdw.model.Jsonable; import com.centurylink.mdw.model.request.Response; public class AdapterStubResponse extends Response implements Jsonable { private static final String JSON_NAME = "AdapterStubResponse"; private int delay; // seconds public int getDelay() { return delay; } public void setDelay(int delay) { this.delay = delay; } private boolean passthrough; public boolean isPassthrough() { return passthrough; } public void setPassthrough(boolean passthrough) { this.passthrough = passthrough; } public AdapterStubResponse(String content) { super(content); } public AdapterStubResponse(JSONObject json) throws JSONException { super(json.getJSONObject(JSON_NAME)); if (json.getJSONObject(JSON_NAME).has("delay")) this.delay = json.getJSONObject(JSON_NAME).getInt("delay"); if (json.getJSONObject(JSON_NAME).has("passthrough")) this.passthrough = json.getJSONObject(JSON_NAME).getBoolean("passthrough"); } public JSONObject getJson() throws JSONException { JSONObject json = create(); JSONObject responseJson = super.getJson(); if (delay > 0) responseJson.put("delay", delay); if (passthrough) responseJson.put("passthrough", passthrough); json.put(JSON_NAME, responseJson); return json; } public String getJsonName() { return JSON_NAME; } }
Direct Detection and Differentiation of Pathogenic Leptospira Species Using a Multi-Gene Targeted Real Time PCR Approach Leptospirosis is a growing public and veterinary health concern caused by pathogenic species of Leptospira. Rapid and reliable laboratory tests for the direct detection of leptospiral infections in animals are in high demand not only to improve diagnosis but also for understanding the epidemiology of the disease. In this work we describe a novel and simple TaqMan-based multi-gene targeted real-time PCR approach able to detect and differentiate Leptospira interrogans, L. kirschneri, L. borgpeteresenii and L. noguchii, which constitute the veterinary most relevant pathogenic species of Leptospira. The method uses sets of species-specific probes, and respective flanking primers, designed from ompL1 and secY gene sequences. To monitor the presence of inhibitors, a duplex amplification assay targeting both the mammal -actin and the leptospiral lipL32 genes was implemented. The analytical sensitivity of all primer and probe sets was estimated to be <10 genome equivalents (GE) in the reaction mixture. Application of the amplification reactions on genomic DNA from a variety of pathogenic and non-pathogenic Leptospira strains and other non-related bacteria revealed a 100% analytical specificity. Additionally, pathogenic leptospires were successfully detected in five out of 29 tissue samples from animals (Mus spp., Rattus spp., Dolichotis patagonum and Sus domesticus). Two samples were infected with L. borgpetersenii, two with L. interrogans and one with L. kirschneri. The possibility to detect and identify these pathogenic agents to the species level in domestic and wildlife animals reinforces the diagnostic information and will enhance our understanding of the epidemiology of leptopirosis. Introduction Leptospirosis is a growing and underestimated public health and veterinary concern, caused by pathogenic spirochetes belonging to the family Leptospiracea, genus Leptospira. The disease is an important cause of abortion, stillbirths, infertility, poor milk production and death amongst livestock, harboring a significant economic impact. Its transmission requires circulation of the agents among domestic and wild animal reservoirs, with rodents recognized as the most important sources that establish persistent renal carriage and urinary shedding of Leptospira. Humans are incidental hosts acquiring a systemic infection upon direct or indirect exposure to the urine, blood or tissue of an infected animal. Farmers, veterinarians, sewer workers, pet keepers, rodent catchers and those persons participating in aquatic leisure activities are more prone to acquire the disease. Conventional classification of Leptospira is based on serological criteria, using the serovar as the basic taxon. To date over 250 pathogenic serovars separated into 25 serogroups are known. The serological classification system is complemented by a genotypic one, in which 21 genetic species are currently recognized, including pathogenic, intermediate and non-pathogenic (or saprophytic) species. Genetic species boundaries hardly correlate with the serological classification. Serological approaches are used commonly for diagnosis of leptospirosis in animals. The reference method is the Microscopic Agglutination Test (MAT), which has the advantage of being specific for serogroups but has several drawbacks of being laborious and requiring a panel of viable Leptospira cultures. Isolation of leptospires, from suspect clinical specimens, constitutes the definitive diagnosis but is also technically demanding, time consuming and subject to contamination and high rates of failure. Isolates are traditionally classified to the serovar level by the Cross Agglutinin Absorption Test (CAAT) which is cumbersome for routine use and is only performed in a few reference laboratories worldwide. Rapid and reliable laboratory tests for the direct detection of leptospiral infections in animals are in high demand, particularly to support suitable control measures. Serology does not corroborate well with the presence of pathogenic viable leptospires in the kidneys or urine and detection of the agents is necessary to identify healthy animal carriers. Molecular-based assays have been previously described for detecting leptospires in clinical samples. Most approaches are PCR-based and target specific genes or polymorphisms in the genome of pathogenic leptospires. Several real time PCR assays have been described predominantly for use with human samples such as whole-blood, serum or urine but only few have been plentifully validated. A few assays were evaluated or used for detecting Leptospira in kidney tissue, blood, urine and other clinical specimens from animals such as sheep, dogs, pigs, deer, flying foxes and rodents. Most assays rely on SYBR green detection chemistry and only differentiate between pathogenic and nonpathogenic leptospires, lacking the ability to distinguish between different species. Nevertheless, speciation of infecting Leptospira from clinical material may be important for determining the clinical significance, the probable source of infection, to distinguish sporadic cases from possible outbreaks and to better access the epidemiology of the disease. In the present work we have developed a novel and simple TaqMan-based multi-gene targeted real-time PCR approach yielding high sensitivity and specificity for the direct detection and differentiation of the most relevant pathogenic Leptospira species in animal samples, suitable for introduction into the routine diagnostics of veterinary laboratories. Bacterial strains Eighty five reference strains and clinical and environmental isolates of Leptospira spp. belonging to pathogenic, intermediate and non-pathogenic phylogenetic clades were used in this study (Table 1) Spiked tissue samples A sample of kidney tissue from a bovine was used for testing as spiked sample. The kidney was acquired from a local official slaughterhouse (Raporal, Portugal), obtained from a bovine intended for normal human consumption, with no signs of leptospirosis. The bovine was not killed specifically for the purpose of this study. Approximately 200 mg portions of kidney tissue were excised with a sterile scalpel and homogenized with 5 ml of PBS buffer in a sterile plastic bag (Whirl-Pak bags) using a stomacher lab-blender. Kidney samples were individually spiked with the following strains, in order to determine the analytical detection sensitivity: Leptospira interrogans (serovar Autumnalis, strain Akiyami), L. kirschneri (serovar Mozdok, strain Portugal 1990), L. noguchii (serovar Panama, strain CZ 214K) and L. borgpetersenii (serovar Tarassovi, strain Mitis Johnson). All the strains were grown at 29uC and the concentrations of leptospires were determined using a Petroff-Hausser counting chamber and adjusted to 10 8 cells/ml with PBS buffer. For each strain, tenfold serial dilutions from 10 7 to 10 0 cells/ml were prepared in PBS buffer and 0.1 ml aliquots were used to spike 0.9 ml of tissue homogenates. Tissue homogenate spiked with 0.1 ml PBS buffer was used as negative control. DNA extraction was performed as described in the paragraph ''Genomic DNA extraction'' below. Tissue samples INIAV IP is the Portuguese Reference Laboratory for animal diseases and provides diagnostic services to national veterinary authorities and private clients. Twenty seven dead wild rodents (25 Mus spp. and 2 Rattus spp.) were sent to the INIAV laboratory during the year 2011 for analysis and further used in this study ( Table 2). The rodents were captured in the Lisbon Zoo under routine operations for rodent population control, by the local veterinary authorities. No animals were sacrificed for the only purposes of research. Additionally, a Patagonian mara (Dolichotis patagonum), also from the zoo, and a swine (Sus domesticus) stillbirth fetus, from a private client, both suspect of dying with leptospirosis, were submitted for analysis to our reference laboratory and later included in this study ( Table 2). On arrival to the laboratory, animals were given a reference number and sent to the pathology where kidney, liver and/or lung tissue samples were collected. Specimens were then analysed using culture-based methods according to the OIE standard procedures for leptospirosis. Briefly, specimens were aseptically collected at necropsy, immediately emulsified in sterile buffered saline solution in a 10% tissue suspension, two to three drops were inoculated in a first tube of medium and two more tubes were similarly inoculated with increasing 10-fold dilutions of the tissue suspension. For the tissue culture, a semisolid Leptospira EMJH medium was used by adding 0.1% agar to commercial EMJH (Difco), to which rabbit serum (0.4%) and 5-Fluorouracil (100 mg/ml) were further added. DNA was extracted directly from tissues homogenates as described below. Genomic DNA extraction Genomic DNA was extracted from both bacterial liquid cultures and tissue homogenates using the QIAamp DNA extraction kit according to the manufacturer's instructions (Qiagen, Hilden, Germany), with a final elution volume of 200 ml. The DNA concentration from the pure cultures was estimated spectrophotometrically using a Nanodrop 1000 spectrophotometer (Nanodrop Technologies, Wilmington, DE) and standardized to a concentration of 10 4 genome equivalents (GE)/ml for use in the reactions. The number of GE was estimated using an average Table 1. Leptospira strains used in the present study and results of the real time PCR assays using the species-specific probes and flanking primers. genome size of 4.6 Mb. Genomic DNA suspensions were stored at 220uC until further use. Design of TaqMan probes and flanking primers DNA sequences of representative strains and species of Leptospira were retrieved from NCBI-GenBank and aligned using the ClustalW algorithm implemented in the program MegAlign (vers. 5.03) (DNAStar, USA). Primers and dual labeled hydrolysis probes (TaqMan probes) were designed to target selected speciesspecific genetic polymorphisms of the following pathogenic Leptospira spp.: L. interrogans, L. borgpetersenii, L. kirschneri and L. noguchii (Table 3). Probes and primers specificities were assessed in silico using the BLAST tools from NCBI-GenBank. All probes and primers were synthesized by MWG Biotech (Germany). Real-time PCR assays We have implemented the following assay format for testing DNA templates extracted from biological samples: (i) a first duplex amplification step aiming the detection of pathogenic Leptospira spp. (by targeting the leptospiral lipL32 gene; Table 3) and including an internal control to monitor the presence of potential amplification inhibitors (by targeting the mammal b-actin gene; Table 3); (ii) if pathogenic leptospires are detected in the first reaction, these may be further discriminated by testing each of the L. interrogans, L. borgpetersenii, L. kirschneri and L. noguchii targeted probes/primers ( Table 3). The CFX96 real-time PCR detection system (Bio-Rad, USA) was used for all assays. The amplification reactions were optimized individually for all the probes and associated primers using the SsoFast Probes Supermix (Bio-Rad, USA), according to the manufacturer's instructions. Each reaction was conducted in a total volume of 20 ml consisting of 16 SsoFast Probes Supermix, 400 nM of each primer, 150 nM of TaqMan probe, DNase free water (GIBCO) and 5 ml of DNA template solution (extracted from pure cultures or tissues samples). Non-template negative controls (with PCR grade water) were included in each run to rule out the possibility of crosscontamination. The assay thermal conditions were as follows: 95uC for 2 min, followed by 45 cycles of 5 s at 95uC and 15 s at the optimized annealing temperature for each probe ( Table 3). The thermal cycling conditions for the duplex amplification targeting b-actin and lipL32 were 95uC for 2 min, followed by 45 cycles of 5 s at 95uC and 35 s at 60uC. Reproducibility of the assays was assessed by repeating the assays at least twice. Data analyses were performed by the detection system of the real-time PCR equipment, according to the manufacturer's instructions. Analytical specificity and sensitivity In order to determine if each set of probe and associated primers was specific for the respective Leptospira target species, the amplification assays were tested on DNA templates extracted from different strains belonging to pathogenic, intermediate and nonpathogenic Leptospira species (Table 1), and from other nonrelated bacteria previously mentioned in ''bacterial strains'' section. The analytical sensitivity of the amplification assays (limits of detection -LODs) were determined using 10-fold serial dilutions of genomic DNA extracted from pure cultures of L. interrogans (serovar Autumnalis, strain Akiyami), L. kirschneri (serovar Mozdok, strain Portugal 1990), L. noguchii (serovar Panama, strain CZ 214K) and L. borgpetersenii (serovar Tarassovi, strain Mitis Johnson). LODs on tissue samples were assessed using DNA extracted from the serially diluted spiked macerates. Each template was tested in triplicate. Sequencing Leptospira isolates obtained from tissue samples were identified by comparative sequence analysis of a 245 bp region of the secY gene, as described by Victoria et al.. Briefly, the region of interest was amplified using primers SecYII (59-GAA TTT CTC TTT TGA TCT TCG-39) and SecYIV (59-GAG TTA GAG CTC AAA TCT AAG-39), which amplify secY sequences from all pathogenic strains of Leptospira. PCR amplifications were performed on a C1000 thermocycler (Bio-Rad) using the following program: an initial step of denaturation for 5 min at 95uC, followed by 34 cycles consisting of annealing, 45 sec at 54uC, extension, 2 min at 72uC, and denaturation, 30 sec at 94uC. Nucleotide sequences were determined, using the same primers, by commercially available sequencing services. Nucleotide sequence analysis and comparison with other relevant reference sequences were performed using the BLAST suite at NCBI-GenBank and aligned using Clustal X or MEGA software (version 5.0). Design of probes and primers Species-specific sets of primers and probes targeting L. interrogans, L. borgpetersenii, L. kirschneri and L. noguchii are listed in Table 3. As shown in Figures S1, S2, S3 and S4 in File S1, these sets of probes and primers contained sufficient polymorphisms to warrant 'in silico' species specific amplification. Analytical specificity and sensitivity Execution of the PCRs on DNA extracted from various bacteria, revealed a highly specific amplification from any of the pathogenic strains belonging to the respective target Leptospira spp., i.e. L. interrogans, L. kirschneri, L borgpetersenii and L. noguchii. None of the other strains yielded a positive amplification reaction (Table 1; Fig. 1A). The analytical sensitivity (LOD) of the amplification assays were found to be between 1 and 10 genome copies in the PCR mixture for each probe and primer set. Spiked tissue samples The LOD of the PCRs on spiked tissue samples was similar for all probe/primers sets targeting the respective target species, and estimated to be 10 3 leptospires/ml of tissue homogenate (< per 20 mg of tissue) (Fig. 1B). Furthermore, the same LOD was estimated for the lipL32-targeted probe/primers when used in duplex amplification reactions with the mammal b-actin probe (not shown). Clinical tissue samples DNA extracted from 27 kidney samples of wild rodents were analysed with the lipL32 and mammal b-actin targeted duplex assay ( Table 2; Fig. 2A). Leptospiral DNA was detected in three samples, as demonstrated by a positive amplification of the lipL32 gene region (Table 2; Fig. 2A). Furthermore, the partial b-actin gene was amplified from all samples, showing that the PCR reactions were not significantly inhibited by potential contaminants. When tested with each of the L. interrogans, L. borgpetersenii, L. kirschneri and L. noguchii targeted probes/ primers, only these three samples showed amplification (Table 2; Fig. 2B). Two of these DNA samples were identified as L. borgpetersenii and one sample as L. interrogans. Testing a pooled sample of kidney and liver tissues from a Patagonian mara, and a lung sample from an aborted swine fetus with the duplex PCR revealed a positive amplification for both samples (Table 2). Subsequent testing with the species-specific sets of probes and Table 2. Results of the bacteriological culture and of the real time amplification assays for the tissue samples analyzed in the present study. primers showed that the Patagonian mara was infected with L. interrogans and the swine fetus with L. kirschneri. Leptospira isolates were only cultured from the samples that also yielded PCR-positive results, thus confirming the presence of viable leptospires (Table 2). Molecular speciation through analysis of the partial sequences of the secY gene was in concordance with the results obtained by the species-specific PCRs. Two isolates were identified as L. borgpetersenii (from wild rodents; GenBank accession numbers KM066006 and KM066007), one as L. kirschneri (from the swine fetus; accession number KM066009) and two as L. interrogans (from a wild rodent and the Patagonian mara; accession numbers KM066008 and KM066010, respectively). Discussion In this work we present a two step real-time PCR strategy to infer the presence of pathogenic leptospires in clinical and veterinary samples. In the first step, we assess if an animal tissue sample is infected with a pathogenic leptospire by targeting its lipL32 gene. The lipL32 gene encodes an outer membrane lipoprotein that is confined to pathogenic Leptospira species. The second step identifies the four most common and veterinary relevant pathogenic Leptospira species, L. interrogans, L. borgpetersenii, L. kirschneri and L. noguchii using dedicated sets of probes and primers. Probes and flanking primers were developed by in silico analysis and further tested for their practical utility on DNA extracted from cultured bacteria, spiked tissues and clinical specimens. The amplification assays have proved to be specific to the respective targeted species, with no cross-reactions when non-pathogenic leptospires or other pathogens were tested. The amplification of the b-actin gene was included in the initial lipL32-based PCR to assess the presence of amplification inhibitors in tissue samples. However, the abundant presence of b-actin gene copies in DNA samples extracted from tissues may ensure some amplification even when low levels of potential inhibitors are present (but amplification curves are usually weaker and anomalous). The analytical sensitivity deduced for the amplification assays, i.e. 1 to 10 GE on DNA extracted from cultured leptospires and 10 3 leptospires/ml tissue homogenate, were similar to the ones of other previous studies concerning the molecular detection of leptospires 19,22]. The panel of species-specific probes and flanking primers may be extended with the design of novel oligonucleotides, e.g. for use in regions where the occurrence of additional species of pathogenic leptospires is common. As far as we know, this is the first report describing a strategy capable of clearly identify four most frequently found pathogenic Leptospira species based on the use of TaqMan probes. From 27 kidney samples of wild rodents, and samples from a Patagonian mara and a porcine fetus suspected of leptospirosis, three rodent samples and the samples from the Patagonian mara and fetus all yielded a positive PCR test for the presence of pathogenic leptospires. In concordance, these samples were also positive by culture. Culture provides proof of infection and thus is an ideal reference standard. Consequently, these results are consistent with a 100% clinical sensitivity and specificity of the PCR. Subsequent prospective analysis of a larger sample set would allow substantiating this conclusion. Phylogenetic identification of the cultures also allowed supporting the findings obtained with the species-specific PCRs. Indeed, speciation by phylogeny was in all cases in concordance with the results obtained via the PCR method. Initially, we anticipated that more samples would be positive by the real time PCR assay than by culture [5,. Recently, Fornazari et al. reported that quantitative PCR presented the highest sensitivity among several techniques to detect leptospires in tissues samples, the bacteriological culture being the least sensitive. Apparently, our procedure of culturing, using macerated fresh tissue has been highly effective. Alternatively, it cannot be excluded that the bacterial load of the tissues might have been very high. Nevertheless, the low rate of positive animals (11%) is not too discrepant from the prevalence values found in other studies where leptospiral DNA was detected in rodents tissues by PCR-based assays, which ranged from 13% to 20%. Furthermore, as far as we know, the region of Lisbon, where the rodents were captured, is not usually regarded as having major leptospirosis problems, which may also reflect a lower prevalence of the agent in reservoirs such as wild rodents. We anticipate that our assays may be useful in studies inferring the prevalence of pathogenic leptospires in wild rodents and other animals, with the advantage of differentiating the infecting Leptospira species. The amplification assays described were able to detect pathogenic leptospires in samples of animal tissues, such as kidney or lung. Although the analysis of this kind of samples is not essential for an early diagnosis of leptospirosis, it has a great value in situations such as epidemiological and post-mortem investigations. The last situation is very well illustrated in this work with the detection of pathogenic leptospires in tissues of a Patagonian mara and a swine fetus. Both animals were suspect of having leptospirosis, which was confirmed by this study. The porcine fetus was infected with a strain belonging to L. kirschneri. Pigs may be infected by several Leptospira species (and serovars) that may cause infertility, fetal death and abortion. Leptospira kirschneri has been reported but seems to be less frequently found in pigs in Portugal than other species. The Patagonian mara, a relatively large rodent that lived in the local zoo, was found to be infected with L. interrogans. To our knowledge, this is the first report describing the molecular detection or the isolation of a pathogenic leptospire from that rodent, which proved to have died of leptospirosis. Zoos are often infested with rats that are notorious reservoirs of L. interrogans. We hypothesise that this Patagonian mara has been infected by rats as the primary infection reservoir, which would support the potential hazard of rodents in zoos for both (exotics) animals and public. The amplification assay described in this work is able to indentify the four most relevant pathogenic species of Leptospira infecting farm and wild animals. While the approach can be extended to other Leptospira species, it is important to continually evaluate the specificity of previously designed probes and primers and, if necessary, modify and improve the sequences, in order to ensure an effective and specific detection and identification of the circulating Leptospira species. Conclusions The molecular assays presented in this work allow the detection and identification of four relevant pathogenic species of Leptospira, directly from animal tissues. The assays proved to be specific and sensitive, and much faster than the bacteriological culture, reducing the time for confirmatory leptospirosis diagnosis. The assays are amenable to future automation possibilities and will reinforce the diagnostic information and enhance our knowledge about the epidemiology of leptopirosis. Supporting Information File S1 Sequence alignments showing the complementary targets of the species-specific Leptospira interrogans, L. kirschneri, L. noguchii and L. borgpetersenii probes and respective flanking primers.
<reponame>zhengtianzuo/-Camel_FileTransferExamples<gh_stars>10-100 /*! *@file Camel_FileTransfer.h *@brief 文件传输开发组件定义 *@version 1.0 *@section LICENSE Copyright (C) 2003-2103 CamelSoft Corporation *@author 郑天佐 *@date 2013-4-3 */ #pragma once #include <string.h> //!@brief 通讯消息 const int Cls_intSendCode_Heart = 0x00001000; //!<心跳包信息 const int Cls_intSendCode_HeartOutTime = 0x00001001; //!<心跳包超时信息 const int Cls_intDataContentSize = 464; //!<数据大小 //!@struct 数据包结构 typedef struct tag_Cls_stuData { tag_Cls_stuData() { Cls_intIPAdrs = 0; Cls_intPort = 0; Cls_intFunction = 0; Cls_intSign = 0; Cls_intIndex = 0; Cls_intSendTime = 0; Cls_intSendSize = 0; memset(Cls_chrData, 0, Cls_intDataContentSize); } int Cls_funGetIPAdrs() const { return (Cls_intIPAdrs); } void Cls_subSetIPAdrs(int intIPAdrs) { Cls_intIPAdrs = intIPAdrs; } int Cls_funGetPort() const { return (Cls_intPort); } void Cls_subSetPort(int intPort) { Cls_intPort = intPort; } int Cls_funGetFunction() const { return (Cls_intFunction); } void Cls_subSetFunction(int intFunction) { Cls_intFunction = intFunction; } int Cls_funGetSign() const { return (Cls_intSign); } void Cls_subSetSign(int intSign) { Cls_intSign = intSign; } int Cls_funGetIndex() const { return (Cls_intIndex); } void Cls_subSetIndex(int intIndex) { Cls_intIndex = intIndex; } int Cls_funGetSendTime() const { return (Cls_intSendTime); } void Cls_subSetSendTime(int intSendTime) { Cls_intSendTime = intSendTime; } int Cls_funGetSendSize() const { return (Cls_intSendSize); } void Cls_subSetSendSize(int intSendSize) { Cls_intSendSize = intSendSize; } private: int Cls_intIPAdrs; //!<数据地址 int Cls_intPort; //!<数据端口 int Cls_intFunction; //!<功能代码 int Cls_intSign; //!<发送标志 int Cls_intIndex; //!<组包序号 int Cls_intSendTime; //!<发送时间 int Cls_intSendSize; //!<内容长度 public: char Cls_chrData[Cls_intDataContentSize]; //!<具体内容 } Cls_stuData, *Cls_lpstuData; const int Cls_intDataTotalSize = sizeof(Cls_stuData); //!<消息内容长度 = 488 //!@struct 数据包结构 typedef struct tag_Cls_stuDataPkg { tag_Cls_stuDataPkg() { Cls_subInit(); } void Cls_subInit() { Cls_chrPSendData = NULL; Cls_lpfunTransfer = NULL; Cls_lpUserData = NULL; memset(&Cls_Data, 0, Cls_intDataTotalSize); } char *&Cls_funGetPSendData() { return (Cls_chrPSendData); } void Cls_subSetPSendData(char* chrPSendData) { Cls_chrPSendData = chrPSendData; } void *&Cls_funGetfunTransfer() { return (Cls_lpfunTransfer); } void Cls_subSetfunTransfer(void *lpfunTransfer) { Cls_lpfunTransfer = lpfunTransfer; } void *&Cls_funGetUserData() { return (Cls_lpUserData); } void Cls_subSetUserData(void *lpUserData) { Cls_lpUserData = lpUserData; } Cls_stuData Cls_Data; //!<结构内容 private: char *Cls_chrPSendData; //!<内容指针 void *Cls_lpfunTransfer; //!<传输指针 void *Cls_lpUserData; //!<用户数据 } Cls_stuDataPkg, *Cls_lpstuDataPkg; const int Cls_intDataPkgTotalSize = sizeof(Cls_stuDataPkg); //!<消息内容长度 //!@struct 心跳包发送结构 typedef struct tag_Cls_stuPackageSendHeart { int Cls_intUserID; //!<用户ID int Cls_intUserType; //!<用户类型 } Cls_stuPackageSendHeart, *Cls_lpstuPackageSendHeart; //!@struct 心跳包接收结构 typedef struct tag_Cls_stuPackageRcvHeart { tag_Cls_stuPackageRcvHeart() { Cls_subInit(); } void Cls_subInit() { Cls_intUserID = 0; Cls_intIPAdrs = 0; Cls_intPort = 0; Cls_intUserType = 0; Cls_intSendTime = 0; memset(&stuSendHeart, 0, sizeof(Cls_stuPackageSendHeart)); } int Cls_funGetUserID() const { return (Cls_intUserID); } void Cls_subSetUserID(int intUserID) { Cls_intUserID = intUserID; } int Cls_funGetIPAdrs() const { return (Cls_intIPAdrs); } void Cls_subSetIPAdrs(int intIPAdrs) { Cls_intIPAdrs = intIPAdrs; } int Cls_funGetPort() const { return (Cls_intPort); } void Cls_subSetPort(int intPort) { Cls_intPort = intPort; } int Cls_funGetUserType() const { return (Cls_intUserType); } void Cls_subSetUserType(int intUserType) { Cls_intUserType = intUserType; } int Cls_funGetSendTime() const { return (Cls_intSendTime); } void Cls_subSetSendTime(int intSendTime) { Cls_intSendTime = intSendTime; } Cls_stuPackageSendHeart stuSendHeart; //!<具体内容 private: int Cls_intUserID; //!<用户ID int Cls_intIPAdrs; //!<数据地址 int Cls_intPort; //!<数据端口 int Cls_intUserType; //!<用户类型 int Cls_intSendTime; //!<发送时间 } Cls_stuPackageRcvHeart, *Cls_lpstuPackageRcvHeart; //!@brief 数据通讯类型 const int Cls_intSign_NoSign = 0x00000000; //!<无消息 const int Cls_intSign_WaitDo = 0x00000001; //!<待处理 const int Cls_intSign_Doing = 0x00000002; //!<处理中 const int Cls_intSign_Response = 0x00000003; //!<回应消息 const int Cls_intSign_Heart = 0x00000004; //!<心跳包 //!@brief 数据通讯状态 const int Cls_intSendStatus_NoSign = 0x00000000; //!<空闲 const int Cls_intSendStatus_Normal = 0x00000001; //!<正常传输中 const int Cls_intSendStatus_Retrying = 0x00000002; //!<数据重发中 const int Cls_intSendStatus_Faild = 0x00000003; //!<数据发送失败 //!@brief 返回错误码分析 const int clsFileTransfer_intErrorCode_Success = 0x00000001; //!<正确的值 const int clsFileTransfer_intErrorCode_NoRights = 0x00000002; //!<效验未通过 const int clsFileTransfer_intErrorCode_Socket = 0x0000000D; //!<套接字错误 const int clsFileTransfer_intErrorCode_ThreadW = 0x00000021; //!<线程失败 const int clsFileTransfer_intErrorCode_Handle = 0x00000022; //!<创建句柄失败 const int clsFileTransfer_intErrorCode_OutOfCache = 0x00000023; //!<超出缓冲最大值 const int clsFileTransfer_intErrorCode_OutOfMemory = 0x00000024; //!<内存溢出 const int clsFileTransfer_intErrorCode_OutTime = 0x00000025; //!<操作超时 const int clsFileTransfer_intErrorCode_UserR = 0x00000098; //!<对方决绝接收文件 const int clsFileTransfer_intErrorCode_UserExit = 0x00000099; //!<自行退出 //!@brief 数据队列返回错误码分析 const int clsDataQueue_intErrorCode_Success = 0x00000001; //!<正确的值 const int clsDataQueue_intErrorCode_InvalidValue = 0x00000002; //!<无效的值 const int clsDataQueue_intErrorCode_OutOfCache = 0x00000003; //!<超出缓冲 const int clsDataQueue_intErrorCode_OutOfMemory = 0x00000004; //!<内存溢出 const int clsDataQueue_intErrorCode_NoData = 0x00000005; //!<无数据 //!@brief 回调函数 typedef unsigned(*Cls_lpfunTransferCallBack)(const void *pContext, const Cls_lpstuDataPkg pData);
10.1Can essential fatty acids prevent early preterm delivery? A meta-analysis of evidence Background Amongst preterm babies, those delivered before 34 weeks gestations contribute disproportionately highly to prematurity related complications. There is a need for effective, acceptable, accessible and safe interventions to prevent early preterm delivery. Objective To evaluate the effects of essential fatty acids on early (<34 weeks) and any (<37 weeks) preterm delivery and relevant fetal outcomes by a systematic review and meta-analysis. Methods We searched MEDLINE, EMBASE and Cochrane Library (19702013). Randomised controlled trials that evaluated effects of essential fatty acids in preventing preterm delivery were selected. Results were summarised as relative risks and 95% confidence intervals for dichotomous outcomes and mean differences for continuous outcomes. We undertook subgroup analysis planned a priori according to the dose, timing of the intervention and risk status. Results Of the nine randomised trials evaluating the effects of essential fatty acids on preterm delivery, six RCTs (4193 women) reported the effect on early preterm delivery. The risk of early preterm delivery was significantly reduced by 58% (RR 0.42, 95% CI 0.27, 0.66; p = 0.0002; I2 = 0%) with essential fatty acids compared to the control group. There was a 17% reduction in the overall risk of any preterm delivery (RR 0.83, 95% CI 0.70, 0.98; p = 0.03; I2 = 0%). Essential fatty acids significantly increased the mean gestational age by 2.0 weeks (95% CI 0.42, 3.5 weeks) and the mean birth weight by 122.1 g (95% CI 47.4, 196.8). Conclusion Essential fatty acids prevent early preterm delivery.
/* * Copyright [yyyy] [name of copyright owner] * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package com.lafaspot.common.concurrent.internal; import java.lang.ref.Reference; import java.lang.ref.SoftReference; import java.lang.ref.WeakReference; import java.lang.reflect.Array; import java.util.LinkedList; import java.util.ListIterator; import java.util.concurrent.Callable; import javax.annotation.Nonnull; import javax.annotation.concurrent.NotThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.lafaspot.common.concurrent.WorkerBlockManager; import com.lafaspot.common.concurrent.WorkerConfig; import com.lafaspot.common.concurrent.WorkerExecutorService; /** * Internal Class used by WorkerExecutor Service, to manage workers in a safe reliable way. * */ @SuppressWarnings("rawtypes") @NotThreadSafe public class WorkerManagerOneThread implements Callable<WorkerManagerState> { /** Executor service instance. If shutdown, this thread will exit. */ private final WorkerExecutorService executorService; /** Internal list of workers to process in this thread. */ private final LinkedList<WorkerWrapper> workers; /** Index for iterating over for our list of workers to be processed. */ private ListIterator<WorkerWrapper> workerIter; /** Reference to the global worker queue from which we will retrieve additional workers to add to our internal list. */ private final WorkerQueue workerSourceQueue; /** Logger instance. */ private final Logger logger = LoggerFactory.getLogger(WorkerManagerOneThread.class); /** Time in milliseconds we will keep this thread running while there are no workers to be processed. */ private static final int THREAD_IDLE_WAIT_TIME_MILLIS = 30000; /** Time in nanoseconds that the thread will sleep between calling the workers. */ private static final int TIME_INTERVAL_PER_EXECUTE_CALL_NANOS = 1000000; /** Time in milliseconds after which we will log an error indicating that a worker took too long to return from a call to execute. */ private static final long WORKER_EXECUTE_WARN_TIME_MILLIS = 1000; /** Time in milliseconds that the thread will sleep when count reach SLEEP_COUNT_NANO. */ private static final int SLEEP_TIME_MILLIS = 1; /** When sleepCount reach SLEEP_COUNT_NANO, than sleep. */ private static final int SLEEP_COUNT_NANO = 999999; /** Sleep buffer factor add between each execute. */ private static final int SLEEP_BUFFER_FACTOR = 50000; /** cleanup frequency for thread locals - 1 hour.*/ private static final int THREADLOCAL_CLEANUP_FREQUENCY = 60 * 60 * 1000; /** Multiplication factor for converting value from nanos to millis. */ private static final int MULTIPLIER_NANOS_MILLIS = 1000000; /** worker config. */ private WorkerConfig workerConfig; /** time in milliseconds when the thread local was last cleaned. */ private long lastThreadLocalCleanupTime; /** * Creates an instance of this callable for the given executor and worker queue. * * @param executor the executor instance to which this object will be submitted * @param queue the WorkerQueue from which this thread should retrieve workers * @param config worker config. */ public WorkerManagerOneThread(@Nonnull final WorkerExecutorService executor, @Nonnull final WorkerQueue queue, @Nonnull final WorkerConfig config) { executorService = executor; workers = new LinkedList<WorkerWrapper>(); workerIter = workers.listIterator(); workerSourceQueue = queue; workerConfig = config; lastThreadLocalCleanupTime = System.currentTimeMillis(); } /** * This method loops until it has no workers to process for some period of time, at which point the thread will exit. Each iteration of the loop * will pull a worker from the shared worker pool and a worker from this manager's internal queue and execute them. If the worker from the shared * pool is not done with its work after the first call, it will be added to the internal queue. This algorithm assures that new workers in the * shared pool will have a high priority so their work (which should be asynchronous) can be started quickly. * * @return WorkerManagerState object * @throws Exception if an error occurs processing the workers */ @Override public WorkerManagerState call() throws Exception { long doneSince = 0; boolean done = false; int sleep = 0; long loopStartTime = System.nanoTime(); long loopTotalSleepTime = 0; while (!done && !executorService.isShutdown()) { final WorkerWrapper existingWorker; final WorkerWrapper newWorker = workerSourceQueue.getWorker(); if (workerIter.hasNext()) { existingWorker = workerIter.next(); } else { // We reached the end of our queue, reset the iterator. analyzeStats(workers, loopStartTime, loopTotalSleepTime); loopStartTime = System.nanoTime(); loopTotalSleepTime = 0; workerIter = workers.listIterator(); if (workerIter.hasNext()) { existingWorker = workerIter.next(); } else { existingWorker = null; } if (workerConfig.getEnableThreadLocalCleanupPeriodically() && loopStartTime * MULTIPLIER_NANOS_MILLIS - lastThreadLocalCleanupTime > THREADLOCAL_CLEANUP_FREQUENCY) { cleanThreadLocals(); lastThreadLocalCleanupTime = loopStartTime * MULTIPLIER_NANOS_MILLIS; } } boolean addNewWorkerToList = false; boolean newWorkerExitedWithTrue = false; if (newWorker != null) { // Execute the new worker first, and if it's not complete add it to our queue WorkerBlockManager blockManager = newWorker.getWorkerImpl().getBlockManager(); try { blockManager.enterExecuteCall(); if (!newWorker.execute()) { addNewWorkerToList = true; } else { newWorkerExitedWithTrue = true; } } finally { blockManager.exitExecuteCall(newWorkerExitedWithTrue); } } boolean existingWorkerExitedWithTrue = false; if (existingWorker != null) { WorkerBlockManager blockManager = existingWorker.getWorkerImpl().getBlockManager(); try { blockManager.enterExecuteCall(); if (existingWorker.execute()) { // This worker is done, so remove it from our internal queue workerIter.remove(); existingWorkerExitedWithTrue = true; } } finally { blockManager.exitExecuteCall(existingWorkerExitedWithTrue); } } if (addNewWorkerToList) { workerIter.add(newWorker); } if (existingWorker == null && newWorker == null) { // Wait for 100ms before releasing the thread if (doneSince == 0) { doneSince = System.currentTimeMillis(); } if (System.currentTimeMillis() - doneSince > THREAD_IDLE_WAIT_TIME_MILLIS) { done = true; } } else { doneSince = 0; } // If a worker just exited with true, don't sleep. if (newWorkerExitedWithTrue || existingWorkerExitedWithTrue) { continue; } else { final int workersSize = workers.size(); if (workersSize <= 1) { sleep = 0; loopTotalSleepTime += SLEEP_TIME_MILLIS; Thread.sleep(SLEEP_TIME_MILLIS); } else { // 50000 nanoseconds for buffer then calculate timePerIteration = TIME_INTERVAL_PER_EXECUTE_CALL_NANOS / (workers.size() + 1) // to sleep at least TIME_INTERVAL_PER_EXECUTE_CALL_NANOS before calling the same worker sleep += SLEEP_BUFFER_FACTOR + TIME_INTERVAL_PER_EXECUTE_CALL_NANOS / workersSize; } } if (sleep >= SLEEP_COUNT_NANO) { sleep = 0; loopTotalSleepTime += SLEEP_TIME_MILLIS; Thread.sleep(SLEEP_TIME_MILLIS); } } workerSourceQueue.removeFuturesDone(); if (workerConfig.getEnableThreadLocalCleanupOnExit()) { cleanThreadLocals(); } return new WorkerManagerState(workers); } /** * Clean thread locals. */ private void cleanThreadLocals() { try { // Get a reference to the thread locals table of the current thread Thread thread = Thread.currentThread(); java.lang.reflect.Field threadLocalsField = Thread.class.getDeclaredField("threadLocals"); threadLocalsField.setAccessible(true); Object threadLocalTable = threadLocalsField.get(thread); if (threadLocalTable == null) { return; } // Get a reference to the array holding the thread local variables inside the // ThreadLocalMap of the current thread Class threadLocalMapClass = Class.forName("java.lang.ThreadLocal$ThreadLocalMap"); java.lang.reflect.Field tableField = threadLocalMapClass.getDeclaredField("table"); tableField.setAccessible(true); Object table = tableField.get(threadLocalTable); if (table == null) { return; } // The key to the ThreadLocalMap is a WeakReference object. The referent field of this object // is a reference to the actual ThreadLocal variable java.lang.reflect.Field referentField = Reference.class.getDeclaredField("referent"); referentField.setAccessible(true); for (int i = 0; i < Array.getLength(table); i++) { // Each entry in the table array of ThreadLocalMap is an Entry object // representing the thread local reference and its value Object entry = Array.get(table, i); if (entry != null) { // Log a debug message in here, to stop using threadlocals -- lafa if (logger.isDebugEnabled()) { Class threadLocalMapEntryClass = Class.forName("java.lang.ThreadLocal$ThreadLocalMap$Entry"); java.lang.reflect.Field entryValueField = threadLocalMapEntryClass.getDeclaredField("value"); entryValueField.setAccessible(true); Object entryValue = entryValueField.get(entry); StringBuilder sb = new StringBuilder(512); if (entryValue != null) { sb.append(entryValue.getClass().getName()); if (entryValue instanceof SoftReference) { sb.append(":").append(((SoftReference) entryValue).get().getClass().getName()); } else if (entryValue instanceof WeakReference) { sb.append(":").append(((WeakReference) entryValue).get().getClass().getName()); } } logger.debug( "threadlocal found please remove this from code, as it create memory/full gc problems, thread name={},class={}", thread.getName(), sb.toString()); } // Get a reference to the thread local object and remove it from the table ThreadLocal threadLocal = (ThreadLocal) referentField.get(entry); threadLocal.remove(); } } } catch (Exception e) { // We will tolerate an exception here and just log it throw new IllegalStateException(e); } } /** * Analyzes stats for all the workers in this WorkerManagerOneThreads's queue. * * @param workers the worker's queue * @param loopStartTime the start time for looping through the worker's queue * @param loopTotalSleepTime the total time this WorkerManager slept executing the worker's queue */ private void analyzeStats(@Nonnull final LinkedList<WorkerWrapper> workers, final long loopStartTime, final long loopTotalSleepTime) { if (!logger.isDebugEnabled()) { return; } long loopDuration = System.nanoTime() - loopStartTime; if (loopDuration - loopTotalSleepTime * 1000000 <= (2 * SLEEP_COUNT_NANO)) { return; } final StringBuffer sb = new StringBuffer(); sb.append("WorkerLoopTotalSleepTime=").append(loopTotalSleepTime).append("ms, WorkerLoopTotalDuration=").append(loopDuration) .append("nanos, WorkersCount=").append(workers.size()).append(", Workers=["); for (final WorkerWrapper wrapper : workers) { sb.append(wrapper.getStat().getStatsAsString()).append(" "); } sb.append("]"); logger.debug(sb.toString()); } }
/** * An n-dimension array of double values used for vectors, matrices, and tensors. * * @author David B. Bracewell */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME) @JsonSubTypes({ @JsonSubTypes.Type(value = Tensor.class, name = "tensor"), @JsonSubTypes.Type(value = DenseMatrix.class, name = "dm"), @JsonSubTypes.Type(value = SparseMatrix.class, name = "sm") }) @JsonAutoDetect( fieldVisibility = JsonAutoDetect.Visibility.NONE, setterVisibility = JsonAutoDetect.Visibility.NONE, getterVisibility = JsonAutoDetect.Visibility.NONE, isGetterVisibility = JsonAutoDetect.Visibility.NONE, creatorVisibility = JsonAutoDetect.Visibility.NONE ) public abstract class NDArray implements Serializable, Observation { private static final long serialVersionUID = 1L; protected static final NumberFormat decimalFormatter = new DecimalFormat(" 0.000000;-0"); @JsonProperty("shape") protected final Shape shape; @JsonProperty("label") @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.WRAPPER_OBJECT) private Object label = null; @JsonProperty("predicted") @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.WRAPPER_OBJECT) private Object predicted = null; @JsonProperty("weight") private double weight = 1d; /** * Instantiates a new NDArray. * * @param shape The shape of the new NDArray */ protected NDArray(@NonNull Shape shape) { this.shape = shape.copy(); } /** * Flips the matrix on its diagonal switching the rows and columns. (This is done per slice) * * @return the transposed array */ public abstract NDArray T(); /** * Adds a scalar value to each element in the NDArray * * @param value the value to add * @return the new NDArray with the scalar value added */ public NDArray add(double value) { if(value == 0) { return copy(); } return map(value, Operator::add); } /** * Adds the values in the other NDArray to this one. * * @param rhs the other NDArray whose values will be added * @return the new NDArray with the result of this + other */ public NDArray add(@NonNull NDArray rhs) { return map(rhs, Operator::add); } /** * Adds the values in the other NDArray to each column in this one. * * @param rhs the other NDArray whose values will be added * @return the new NDArray */ public NDArray addColumnVector(@NonNull NDArray rhs) { return mapColumn(rhs, Operator::add); } /** * Adds the values in the other NDArray to each row in this one. * * @param rhs the other NDArray whose values will be added * @return the new NDArray */ public NDArray addRowVector(@NonNull NDArray rhs) { return mapRow(rhs, Operator::add); } /** * Adds a scalar value to each element in the NDArray in-place * * @param value the value to add * @return this NDArray with the scalar value added */ public NDArray addi(double value) { if(value != 0) { return mapi(value, Operator::add); } return this; } /** * Adds the values in the other NDArray to this one in-place. * * @param rhs the other NDArray whose values will be added * @return this NDArray with the result of this + other */ public NDArray addi(@NonNull NDArray rhs) { return mapi(rhs, Operator::add); } /** * Performs a column vector addition adding the values in the other NDArray to each column in this NDArray. * * @param rhs the other NDArray whose values will be added * @return this NDArray with the result of this + other */ public NDArray addiColumnVector(@NonNull NDArray rhs) { return mapiColumn(rhs, Operator::add); } /** * Performs a row vector addition adding the values in the other NDArray to each row in this NDArray. * * @param rhs the other NDArray whose values will be added * @return this NDArray with the result of this + other */ public NDArray addiRowVector(@NonNull NDArray rhs) { return mapiRow(rhs, Operator::add); } /** * Calculates the index in the NDArray with maximum value. * * @return the index with maximum value */ public abstract long argmax(); /** * Calculates the index in the NDArray with minimum value. * * @return the index with minimum value */ public abstract long argmin(); private double asDouble(Object object) { if(object == null) { return Double.NaN; } else if(object instanceof NDArray) { NDArray array = Cast.as(object); if(array.shape.isScalar()) { return array.scalar(); } return array.argmax(); } return Cast.<Number>as(object).doubleValue(); } @Override public NDArray asNDArray() { return this; } private NDArray asNDArray(Object o, int dimension) { if(o == null) { return com.gengoai.apollo.math.linalg.NDArrayFactory.ND.empty(); } else if(o instanceof Number) { Number numLabel = Cast.as(o); if(dimension == 1) { return com.gengoai.apollo.math.linalg.NDArrayFactory.ND.scalar(numLabel.floatValue()); } return com.gengoai.apollo.math.linalg.NDArrayFactory.ND.array(dimension).set(numLabel.intValue(), 1f); } NDArray nd = Cast.as(o, NDArray.class); Validation.notNull(nd, "Cannot create NDArray from object."); return nd; } /** * Number of channels in the NDArray * * @return the number of channels in the NDArray */ public int channels() { return shape.channels(); } /** * Calculates the index of maximum values per column in the NDArray. * * @return the NDArray of column indexes with maximum value. */ public abstract NDArray columnArgmaxs(); /** * Calculates the index of minimum values per column in the NDArray. * * @return the NDArray of column indexes with minimum value. */ public abstract NDArray columnArgmins(); /** * Calculates the maximum values per column in the NDArray. * * @return the NDArray of maximum values per column. */ public abstract NDArray columnMaxs(); /** * Calculates the mean values per column in the NDArray. * * @return the NDArray of mean values per column. */ public NDArray columnMeans() { return columnSums().divi(shape().rows()); } /** * Calculates the minimum values per column in the NDArray. * * @return the NDArray of minimum values per column. */ public abstract NDArray columnMins(); /** * Calculates sums per column in the NDArray. * * @return the NDArray of sums per column. */ public abstract NDArray columnSums(); /** * Number of columns in the NDArray * * @return the number of columns in the NDArray */ public int columns() { return shape.columns(); } /** * Compacts the memory usages of sparse NDArrays. * * @return this NDArray */ public abstract NDArray compact(); public boolean isEmpty(){ return shape.sliceLength == 0 && shape.matrixLength == 0; } @Override public NDArray copy() { return Copyable.deepCopy(this); } /** * Generates a diagonal matrix per slice. * * @return The NDArray with diagonal slices. */ public abstract NDArray diag(); /** * Divides the values in the other NDArray to this one element by element. * * @param rhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray div(@NonNull NDArray rhs) { return map(rhs, Operator::divide); } /** * Divides a scalar value to each element in the NDArray * * @param value the value to divide * @return the new NDArray with the scalar value divided */ public NDArray div(double value) { return map(value, Operator::divide); } /** * Divides a column vector element division dividing the values in the other NDArray to each column in this NDArray. * * @param rhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray divColumnVector(@NonNull NDArray rhs) { return mapColumn(rhs, Operator::divide); } /** * Divides a row vector element division dividing the values in the other NDArray to each row in this NDArray. * * @param rhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray divRowVector(@NonNull NDArray rhs) { return mapRow(rhs, Operator::divide); } /** * Divides a scalar value to each element in the NDArray in-place. * * @param rhs the value to divide * @return this NDArray with the scalar value divided */ public NDArray divi(@NonNull NDArray rhs) { return mapi(rhs, Operator::divide); } /** * Divides a scalar value to each element in the NDArray in-place. * * @param value the value to divide * @return this NDArray with the scalar value divided */ public NDArray divi(double value) { return mapi(value, Operator::divide); } /** * Divides a column vector element division dividing the values in the other NDArray to each column in this NDArray. * * @param rhs the other NDArray whose values will be divided * @return this NDArray with the result of this / other */ public NDArray diviColumnVector(@NonNull NDArray rhs) { return mapiColumn(rhs, Operator::divide); } /** * Divides a row vector element division dividing the values in the other NDArray to each row in this NDArray. * * @param rhs the other NDArray whose values will be divided * @return this NDArray with the result of this / other */ public NDArray diviRowVector(@NonNull NDArray rhs) { return mapiRow(rhs, Operator::divide); } /** * Calculates the dot product between this and the given other NDArray per slice. * * @param rhs the NDArray to calculate the dot product with * @return NDArray of dot products */ public abstract double dot(@NonNull NDArray rhs); /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is equal to the given value. * * @param value the value test equality for * @return the NDArray */ public NDArray eq(double value) { return test(v -> v == value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is equal to the value in the other * NDArray. * * @param rhs the NDArray whose values to test equality for * @return the NDArray */ public NDArray eq(@NonNull NDArray rhs) { return test(rhs, (v, value) -> v == value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is equal to the given value. * * @param value the value test equality for * @return this NDArray */ public NDArray eqi(double value) { return testi(v -> v == value); } /** * Updates this NDArray element's to qual <code>1.0</code> if its value is equal to the value in the other NDArray. * * @param rhs the NDArray whose values to test equality for * @return this NDArray */ public NDArray eqi(NDArray rhs) { return testi(rhs, (v, value) -> v == value); } /** * Fills the NDArray with the given value * * @param value the value to set all cells in the NDArray * @return This NDArray */ public abstract NDArray fill(double value); /** * Processes the sparse entries in this NDArray * * @param consumer the consumer */ public abstract void forEachSparse(@NonNull EntryConsumer consumer); /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is greater than or equal to the given * value. * * @param value the value to test * @return the NDArray */ public NDArray ge(double value) { return test(v -> v >= value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is greater than or equal to the value * in the other NDArray. * * @param rhs the NDArray whose values to test * @return the NDArray */ public NDArray ge(@NonNull NDArray rhs) { return test(rhs, (v, value) -> v >= value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is greater than or equal to the given * value. * * @param value the value to test * @return this NDArray */ public NDArray gei(double value) { return testi(v -> v >= value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is greater than or equal to the value in * the other NDArray. * * @param rhs the NDArray whose values to test * @return this NDArray */ public NDArray gei(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v >= value); } /** * Gets the value at the given index (row/column if vector, entry if other) * * @param i the index * @return the double value */ public abstract double get(long i); /** * Gets the value of the NDArray at the given row and column. (Assumes channel and kernel are 0) * * @param row the row index * @param col the column index * @return the double value */ public abstract double get(int row, int col); /** * Gets the value of the NDArray at the given channel, row, and column (Assumes kernel is 0) * * @param channel the channel index * @param row the row index * @param col the column index * @return the double value */ public abstract double get(int channel, int row, int col); /** * Gets the value of the NDArray at the given kernel, channel, row, and column * * @param kernel the kernel index * @param channel the channel index * @param row the row index * @param col the column index * @return the double value */ public abstract double get(int kernel, int channel, int row, int col); /** * Creates an NDArray made up of the column at the given index for each slice. (Note modifications to the new NDArray * do not effect this one). * * @param column the column index * @return the column NDArray */ public abstract NDArray getColumn(int column); /** * Creates an NDArray made up of only the columns given. * * @param columns the rows to extract * @return the new NDArray */ public abstract NDArray getColumns(int[] columns); /** * Creates an NDArray made up of the columns starting at the given from index and ending at (not-including) the to * index. * * @param from the starting index * @param to the ending index * @return the new NDArray */ public abstract NDArray getColumns(int from, int to); /** * Gets the label associated with the NDArray * * @param <T> the type of the label * @return the label */ public <T> T getLabel() { return Cast.as(label); } /** * Gets the label associated with the NDArray as a double value. * * @return the label as double */ public double getLabelAsDouble() { return asDouble(label); } /** * Gets the label associated with the NDArray as an NDArray * * @return the label as NDArray */ public NDArray getLabelAsNDArray() { return getLabelAsNDArray(1); } /** * Gets the label associated with this NDArray as an NDArray (vector) with desired dimension. * * @param dimension the dimension * @return the label as nd array */ public NDArray getLabelAsNDArray(int dimension) { return asNDArray(label, dimension); } /** * Gets the predicted label associated with this NDArray. * * @param <T> the type parameter * @return the predicted label */ public <T> T getPredicted() { return Cast.as(predicted); } /** * Gets the predicted label associated with the NDArray as a double value. * * @return the predicted label as double */ public double getPredictedAsDouble() { return asDouble(predicted); } /** * Gets the predicted label associated with the NDArray as an NDArray * * @return the predicted label as NDArray */ public NDArray getPredictedAsNDArray() { return asNDArray(predicted, 1); } /** * Gets the predicted label associated with this NDArray as an NDArray (vector) with desired dimension. * * @param dimension the dimension * @return the predicted label as NDArray */ public NDArray getPredictedAsNDArray(int dimension) { return asNDArray(predicted, dimension); } /** * Creates an NDArray made up of the row at the given index for each slice. (Note modifications to the new NDArray do * not effect this one). * * @param row the row index * @return the row NDArray */ public abstract NDArray getRow(int row); /** * Creates an NDArray made up of only the rows given. * * @param rows the rows to extract * @return the new NDArray */ public abstract NDArray getRows(int[] rows); /** * Creates an NDArray made up of the rows starting at the given from index and ending at (not-including) the to * index. * * @param from the starting index * @param to the ending index * @return the new NDArray */ public abstract NDArray getRows(int from, int to); /** * Creates a new NDArray made up of sub-portions of the slices. * * @param fromRow the index of the row to start slicing from * @param toRow the index of the row to end the slicing at * @param fromCol the index of the column to start slicing from * @param toCol the index of the column to end slicing at * @return the NDArray */ public abstract NDArray getSubMatrix(int fromRow, int toRow, int fromCol, int toCol); @Override public Stream<Variable> getVariableSpace() { return Stream.empty(); } /** * Gets the weight associated with the NDArray. * * @return the weight */ public double getWeight() { return weight; } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is greater than to the given value. * * @param value the value to test * @return the NDArray */ public NDArray gt(double value) { return test(v -> v > value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is greater than to the value in the * other NDArray. * * @param rhs the NDArray whose values to test * @return the NDArray */ public NDArray gt(@NonNull NDArray rhs) { return test(rhs, (v, value) -> v > value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is greater than to the given value. * * @param value the value to test * @return this NDArray */ public NDArray gti(double value) { return testi(v -> v > value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is greater than to the value in the other * NDArray. * * @param rhs the NDArray whose values to test * @return this NDArray */ public NDArray gti(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v > value); } public abstract NDArray incrementiColumn(int c, NDArray vector); /** * Checks if the NDArray is made up of dense slices * * @return True if the NDArray is made up of dense slices, False otherwise */ public abstract boolean isDense(); @Override public boolean isNDArray() { return true; } /** * Number of kernels in the NDArray * * @return the number of kernels in the NDArray */ public int kernels() { return shape.kernels(); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is less than or equal to the given * value. * * @param value the value to test * @return the NDArray */ public NDArray le(double value) { return test(v -> v <= value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is less than or equal to the value in * the other NDArray. * * @param rhs the NDArray whose values to test * @return the NDArray */ public NDArray le(@NonNull NDArray rhs) { return test(rhs, (v, value) -> v <= value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is less than or equal to the given value. * * @param value the value to test * @return this NDArray */ public NDArray lei(double value) { return testi(v -> v <= value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is less than or equal to the value in the * other NDArray. * * @param rhs the NDArray whose values to test * @return this NDArray */ public NDArray lei(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v <= value); } /** * The total number of elements. (<code>kernels * channels * rows * columns</code>) * * @return the length (total number) of the elements in the NDArray */ public long length() { return shape.sliceLength * shape.matrixLength; } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is less than to the given value. * * @param value the value to test * @return the NDArray */ public NDArray lt(double value) { return test(v -> v < value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is less than to the value in the other * NDArray. * * @param rhs the NDArray whose values to test * @return the NDArray */ public NDArray lt(@NonNull NDArray rhs) { return test(rhs, (v, value) -> v < value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is less than the given value. * * @param value the value to test * @return this NDArray */ public NDArray lti(double value) { return testi(v -> v < value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is less than to the value in the other * NDArray. * * @param rhs the NDArray whose values to test * @return this NDArray */ public NDArray lti(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v < value); } /** * Creates a new NDArray with values from this NDArray evaluated using the given unary operator. * * @param operator the operation to perform on the values of this NDArray * @return the transformed NDArray */ public abstract NDArray map(@NonNull DoubleUnaryOperator operator); /** * Creates a new NDArray with values from this NDArray evaluated by the given binary operation with the given value. * * @param value the value * @param operator the operation to perform on the values of this NDArray and the given value * @return the transformed NDArray */ public abstract NDArray map(double value, @NonNull DoubleBinaryOperator operator); /** * Creates a new NDArray with values from this NDArray and the given NDArray evaluated using the given binary * operation. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray map(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Creates a new NDArray with values from this NDArray and the given NDArray evaluated using the given binary * operation per column. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapColumn(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values in the given column of this NDArray by performing the given binary operation with the values * in the given NDArray. * * @param column the column whose values we want to manipulate * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapColumn(int column, @NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Creates a new NDArray with values from this NDArray and the given NDArray evaluated using the given binary * operation per row. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapRow(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values in the given row of this NDArray by performing the given binary operation with the values in * the given NDArray. * * @param row the row whose values we want to manipulate * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapRow(int row, @NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); @Override public void mapVariables(@NonNull Function<Variable, Variable> mapper) { throw new UnsupportedOperationException("NDArray does not support mapping."); } /** * Updates the values in this NDArray evaluated using the given unary operator. * * @param operator the operation to perform on the values of this NDArray * @return the transformed NDArray */ public abstract NDArray mapi(@NonNull DoubleUnaryOperator operator); /** * Updates the values in this NDArray by performing he given binary operation with the given value. * * @param value the value * @param operator the operation to perform on the values of this NDArray and the given value * @return the transformed NDArray */ public abstract NDArray mapi(double value, @NonNull DoubleBinaryOperator operator); /** * Updates the values int this NDArray by performing the given binary operation with the values in the given * NDArray. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapi(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values int this NDArray by performing the given binary operation with the values in the given NDArray * per column. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapiColumn(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values in the given column of this NDArray by performing the given binary operation with the values * in the given NDArray. * * @param column the column whose values we want to manipulate * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapiColumn(int column, @NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values in the given row of this NDArray by performing the given binary operation with the values in * the given NDArray. * * @param row the row whose values we want to manipulate * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapiRow(int row, @NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Updates the values int this NDArray by performing the given binary operation with the values in the given NDArray * per row. * * @param rhs the rhs * @param operator the operation to perform on the values of this NDArray and the given NDArray * @return the transformed NDArray */ public abstract NDArray mapiRow(@NonNull NDArray rhs, @NonNull DoubleBinaryOperator operator); /** * Calculates the maximum value in the NDArray. * * @return the maximum value */ public abstract double max(); /** * Calculates the mean value in the NDArray * * @return the mean value */ public double mean() { return sum() / (shape().matrixLength * shape().sliceLength); } /** * Calculates the minimum value in the NDArray * * @return the minimum value */ public abstract double min(); /** * Creates a new NDArray by multiplying the (matrix) slices of this NDArray with those in the given NDArray. * * @param rhs the NDArray to multiply * @return the resulting NDArray */ public abstract NDArray mmul(@NonNull NDArray rhs); /** * Multiplies the values in the other NDArray to this one element by element. * * @param rhs the other NDArray whose values will be multiplied * @return the new NDArray with the result of this * other */ public NDArray mul(@NonNull NDArray rhs) { return map(rhs, Operator::multiply); } /** * Multiplies a scalar value to each element in the NDArray * * @param value the value to multiplied * @return the new NDArray with the scalar value multiplied */ public NDArray mul(double value) { return map(value, Operator::multiply); } /** * Performs a column vector element multiplication multiplying the values in the other NDArray to each in this * NDArray. * * @param rhs the other NDArray whose values will be multiplied * @return the new NDArray with the result of this * other */ public NDArray mulColumnVector(@NonNull NDArray rhs) { return mapColumn(rhs, Operator::multiply); } /** * Performs a row vector element multiplication multiplying the values in the other NDArray to each row in this * NDArray. * * @param rhs the other NDArray whose values will be multiplied * @return the new NDArray with the result of this * other */ public NDArray mulRowVector(@NonNull NDArray rhs) { return mapRow(rhs, Operator::multiply); } /** * Multiplies the values in the other NDArray to this one element by element in-place. * * @param rhs the other NDArray whose values will be multiplied * @return this NDArray with the result of this * other */ public NDArray muli(@NonNull NDArray rhs) { return mapi(rhs, Operator::multiply); } /** * Multiplies a scalar value to each element in the NDArray in-place. * * @param value the value to multiplied * @return this NDArray with the scalar value multiplied */ public NDArray muli(double value) { return mapi(value, Operator::multiply); } /** * Performs a column vector element multiplication multiplying the values in the other NDArray to each in this * NDArray in-place. * * @param rhs the other NDArray whose values will be multiplied * @return the new NDArray with the result of this * other */ public NDArray muliColumnVector(@NonNull NDArray rhs) { return mapiColumn(rhs, Operator::multiply); } /** * Performs a row vector element multiplication multiplying the values in the other NDArray to each row in this * NDArray in-place. * * @param rhs the other NDArray whose values will be multiplied * @return the new NDArray with the result of this * other */ public NDArray muliRowVector(@NonNull NDArray rhs) { return mapiRow(rhs, Operator::multiply); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is not equal to the given value. * * @param value the value test equality for * @return the NDArray */ public NDArray neq(double value) { return testi(v -> v != value); } /** * Creates a new NDArray with elements equal to <code>1.0</code> if its value is not equal to the value in the other * NDArray. * * @param rhs the NDArray whose values to test equality for * @return the NDArray */ public NDArray neq(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v != value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is not equal to the given value. * * @param value the value test equality for * @return this NDArray */ public NDArray neqi(double value) { return testi(v -> v != value); } /** * Updates this NDArray element's to equal <code>1.0</code> if its value is not equal to the value in the other * NDArray. * * @param rhs the NDArray whose values to test equality for * @return this NDArray */ public NDArray neqi(@NonNull NDArray rhs) { return testi(rhs, (v, value) -> v != value); } /** * Calculates the L1 norm of the NDArray * * @return the L1 norm of the NDArray */ public abstract double norm1(); /** * Calculates the L2 norm of the NDArray * * @return the L2 norm of the NDArray */ public abstract double norm2(); public abstract NDArray padColumnPost(int maxLength); public abstract NDArray padPost(int maxRowLength, int maxColumnLength); public abstract NDArray padRowPost(int maxLength); /** * Calculates the pivot elements for this square matrix. Will calculate per slice. * * @return A NDArray of 1's and 0's representing pivot elements. */ public abstract NDArray pivot(); private void printSlice(NDArray slice, int maxR, int maxC, StringBuilder builder) { builder.append("["); builder.append(rowToString(slice, 0, maxC)); int breakPoint = maxR / 2; for(int i = 1; i < slice.rows(); i++) { builder.append(","); if(i == breakPoint) { int nj = Math.max(slice.rows() - breakPoint, i + 1); if(nj > i + 1) { builder.append(System.lineSeparator()).append(" ...").append(System.lineSeparator()); } i = nj; } builder.append(System.lineSeparator()).append(" ").append(rowToString(slice, i, maxC)); } builder.append("]"); } /** * Divides the values in the this NDArray from the other NDArray. * * @param lhs the other NDArray whose values will be divided from * @return the new NDArray with the result of other / this */ public NDArray rdiv(@NonNull NDArray lhs) { return map(lhs, (v1, v2) -> v2 / v1); } /** * Divides each element's value from the given scalar (e.g. scalar - element) * * @param value the value to divide * @return the new NDArray with the scalar value divided */ public NDArray rdiv(double value) { return map(value, (v1, v2) -> v2 / v1); } /** * Performs a column vector division dividing the values in this NDArray from the other NDArray to each column in * this NDArray. * * @param lhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray rdivColumnVector(@NonNull NDArray lhs) { return mapColumn(lhs, (v1, v2) -> v2 / v1); } /** * Performs a row vector division dividing the values in this NDArray from the other NDArray to each row in this * NDArray. * * @param lhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray rdivRowVector(@NonNull NDArray lhs) { return mapRow(lhs, (v1, v2) -> v2 / v1); } /** * Divides the values in the this NDArray from the other NDArray in-place. * * @param lhs the other NDArray whose values will be divided from * @return the new NDArray with the result of other / this */ public NDArray rdivi(@NonNull NDArray lhs) { return mapi(lhs, (v1, v2) -> v2 / v1); } /** * Divides each element's value from the given scalar (e.g. scalar - element) in-place * * @param value the value to divide * @return the new NDArray with the scalar value divided */ public NDArray rdivi(double value) { return mapi(value, (v1, v2) -> v2 / v1); } /** * Performs a column vector division dividing the values in this NDArray from the other NDArray to each column in * this NDArray in-place. * * @param lhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray rdiviColumnVector(@NonNull NDArray lhs) { return mapiColumn(lhs, (v1, v2) -> v2 / v1); } /** * Performs a row vector division dividing the values in this NDArray from the other NDArray to each row in this * NDArray in-place. * * @param lhs the other NDArray whose values will be divided * @return the new NDArray with the result of this / other */ public NDArray rdiviRowVector(@NonNull NDArray lhs) { return mapiRow(lhs, (v1, v2) -> v2 / v1); } @Override public void removeVariables(@NonNull Predicate<Variable> filter) { throw new UnsupportedOperationException("NDArray does not support filtering."); } /** * Reshapes the NDArray * * @param dims the new dimensions of the NDArray * @return this NDArray with new shape */ public abstract NDArray reshape(int... dims); /** * Calculates the index of maximum values per row in the NDArray. * * @return the NDArray of row indexes with maximum value. */ public abstract NDArray rowArgmaxs(); /** * Calculates the index of minimum values per row in the NDArray. * * @return the NDArray of row indexes with minimum value. */ public abstract NDArray rowArgmins(); /** * Calculates the maximum values per row in the NDArray. * * @return the NDArray of maximum values per row. */ public abstract NDArray rowMaxs(); /** * Calculates the mean values per row in the NDArray. * * @return the NDArray of mean values per row. */ public NDArray rowMeans() { return rowSums().divi(shape().columns()); } /** * Calculates the minimum values per row in the NDArray. * * @return the NDArray of minimum values per row. */ public abstract NDArray rowMins(); /** * Calculates the sum per row in the NDArray. * * @return the NDArray of sum per row. */ public abstract NDArray rowSums(); private String rowToString(NDArray slice, int i, int maxC) { StringBuilder builder = new StringBuilder("["); builder.append(decimalFormatter.format(slice.get(i, 0))); int breakPoint = maxC / 2; for(int j = 1; j < slice.columns(); j++) { if(j == breakPoint) { int nj = Math.max(slice.columns() - breakPoint, j + 1); if(nj > j + 1 && nj < slice.columns()) { builder.append(", ..."); } if(nj < slice.columns()) { j = nj; } else { continue; } } builder.append(", ").append(decimalFormatter.format(slice.get(i, j))); } return builder.append("]").toString(); } /** * Number of rows in the NDArray * * @return the number of rows in the NDArray */ public int rows() { return shape.rows(); } /** * Subtracts the values in the this NDArray from the other NDArray. * * @param lhs the other NDArray whose values will be subtracted from * @return the new NDArray with the result of other - this */ public NDArray rsub(@NonNull NDArray lhs) { return map(lhs, (v1, v2) -> v2 - v1); } /** * Subtracts each element's value from the given scalar (e.g. scalar - element) * * @param value the value to subtract * @return the new NDArray with the scalar value subtracted */ public NDArray rsub(double value) { return map(value, (v1, v2) -> v2 - v1); } /** * Performs a column vector subtraction subtracting the values in this NDArray from the other NDArray to each column * in this NDArray. * * @param lhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray rsubColumnVector(@NonNull NDArray lhs) { return mapColumn(lhs, (v1, v2) -> v2 - v1); } /** * Performs a row vector subtraction subtracting the values in this NDArray from the other NDArray to each row in * this NDArray. * * @param lhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray rsubRowVector(@NonNull NDArray lhs) { return mapRow(lhs, (v1, v2) -> v2 - v1); } /** * Subtracts the values in the this NDArray from the other NDArray in-place. * * @param lhs the other NDArray whose values will be subtracted from * @return the new NDArray with the result of other - this */ public NDArray rsubi(@NonNull NDArray lhs) { return mapi(lhs, (v1, v2) -> v2 - v1); } /** * Subtracts each element's value from the given scalar (e.g. scalar - element) in-place * * @param value the value to subtract * @return the new NDArray with the scalar value subtracted */ public NDArray rsubi(double value) { return mapi(value, (v1, v2) -> v2 - v1); } /** * Performs a column vector subtraction subtracting the values in this NDArray from the other NDArray to each column * in this NDArray in-place. * * @param lhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray rsubiColumnVector(@NonNull NDArray lhs) { return mapiColumn(lhs, (v1, v2) -> v2 - v1); } /** * Performs a row vector subtraction subtracting the values in this NDArray from the other NDArray to each row in * this NDArray in-place. * * @param lhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray rsubiRowVector(@NonNull NDArray lhs) { return mapiRow(lhs, (v1, v2) -> v2 - v1); } /** * Returns the scalar value of this NDArray (value at <code>(0,0,0,0)</code>) * * @return the scalar value */ public double scalar() { return get(0); } /** * Selects all values matching the given predicate. * * @param predicate the predicate to test * @return new NDArray with values passing the given predicate and zeros elsewhere */ public NDArray select(@NonNull DoublePredicate predicate) { return map(v -> predicate.test(v) ? v : 0.0); } /** * Selects all values for which the corresponding element in the given NDArray has a value of <code>1.0</code>. * * @param rhs the NDArray used to determine which values are selected * @return the selected NDArray */ public NDArray select(@NonNull NDArray rhs) { return map(rhs, (v1, v2) -> v2 == 1.0 ? 1.0 : 0.0); } /** * Selects all values matching the given predicate in-place. * * @param predicate the predicate to test * @return this NDArray with values passing the given predicate and zeros elsewhere */ public NDArray selecti(@NonNull DoublePredicate predicate) { return mapi(v -> predicate.test(v) ? v : 0.0); } /** * Selects all values for which the corresponding element in the given NDArray has a value of <code>1.0</code> * in-place. * * @param rhs the NDArray used to determine which values are selected * @return the selected NDArray */ public NDArray selecti(@NonNull NDArray rhs) { return mapi(rhs, (v1, v2) -> v2 == 1.0 ? 1.0 : 0.0); } /** * Sets the value of the element at the given index. (row/column if vector, entry if other) * * @param i the index * @param value the value * @return this NDArray */ public abstract NDArray set(long i, double value); /** * Sets the value of the element at the given row and column (assumes kernel and channel are 0). * * @param row the row index * @param col the column index * @param value the value * @return this NDArray */ public abstract NDArray set(int row, int col, double value); /** * Sets the value of the element at the given channel, row, and column (assumes kernel is 0). * * @param channel the channel index * @param row the row index * @param col the column index * @param value the value * @return this NDArray */ public abstract NDArray set(int channel, int row, int col, double value); /** * Sets the value of the element at the given kernel, channel, row, and column * * @param kernel the kernel index * @param channel the channel index * @param row the row index * @param col the column index * @param value the value * @return this NDArray */ public abstract NDArray set(int kernel, int channel, int row, int col, double value); /** * Sets the values of the <code>ith</code> column to those in the given NDArray. * * @param i the column index * @param array the array of new column values * @return this NDArray */ public abstract NDArray setColumn(int i, @NonNull NDArray array); /** * Sets the label associated with the NDArray * * @param label the label * @return This NDArray */ public NDArray setLabel(Object label) { this.label = label; return this; } /** * Sets the matrix associated with the given kernel and channel. * * @param kernel the kernel index * @param channel the channel index * @param array the matrix * @return this NDArray */ public NDArray setMatrix(int kernel, int channel, @NonNull NDArray array) { return setSlice(shape.sliceIndex(kernel, channel), array); } /** * Sets the predicted label for this NDArray. * * @param predicted the predicted label * @return this NDArray */ public NDArray setPredicted(Object predicted) { this.predicted = predicted; return this; } /** * Sets the values of the <code>ith</code> row to those in the given NDArray. * * @param i the row index * @param array the array of new row values * @return this NDArray */ public abstract NDArray setRow(int i, @NonNull NDArray array); /** * Sets the slice at the given index. * * @param slice the slice index * @param array the NDArray of values for the new slice * @return this NDArray */ public abstract NDArray setSlice(int slice, @NonNull NDArray array); /** * Sets the weight associated with the NDArray. * * @param weight the weight * @return this NDArray */ public NDArray setWeight(double weight) { this.weight = (float) weight; return this; } /** * Gets the shape of the NDArray. * * @return the shape */ public final Shape shape() { return shape; } /** * Then number of sparse entries (dense NDArray will have <code>size()=length()</code>) * * @return the number of sparse entries. */ public long size() { return length(); } /** * Returns a view of a single slice of this NDArray. Note that changes to the slice will effect this NDArray. * * @param slice the slice index * @return the NDArray for the slice */ public abstract NDArray slice(int slice); /** * Calculates the index of the per-slice maximum values. * * @return the per-slice argmax */ public abstract NDArray sliceArgmaxs(); /** * Calculates the index of the per-slice minimum values. * * @return the per-slice argmin */ public abstract NDArray sliceArgmins(); /** * Calculates the dot product between each slice of this and the given NDArray * * @param rhs the NDArray to calculate the dot product with * @return the per-slice dot product */ public abstract NDArray sliceDot(NDArray rhs); /** * Calculates the per-slice maximum values. * * @return the per-slice maximum values */ public abstract NDArray sliceMaxs(); /** * Calculates the per-slice mean values. * * @return the per-slice mean values */ public abstract NDArray sliceMeans(); /** * Calculates the per-slice minimum values. * * @return the per-slice minimum values */ public abstract NDArray sliceMins(); /** * Calculates the per-slice L1 norm values. * * @return the per-slice L1 norm values */ public abstract NDArray sliceNorm1(); /** * Calculates the per-slice L2 norm values. * * @return the per-slice L2 norm values */ public abstract NDArray sliceNorm2(); /** * Calculates the per-slice sum of square values. * * @return the per-slice sum of square values */ public abstract NDArray sliceSumOfSquares(); /** * Calculates the per-slice sums. * * @return the per-slice sums */ public abstract NDArray sliceSums(); /** * Gets the indices of the sparse entries * * @return the index array */ public abstract int[] sparseIndices(); /** * Calculates the sparsity (Percentage of elements with a zero value) of the NDArray * * @return the sparsity (will equal to 1 if dense) */ public double sparsity() { return 1.0 - ((double) size()) / ((double) length()); } /** * Subtracts the values in the other NDArray to this one. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray sub(@NonNull NDArray rhs) { return map(rhs, Operator::subtract); } /** * Subtracts a scalar value to each element in the NDArray * * @param value the value to subtract * @return the new NDArray with the scalar value subtracted */ public NDArray sub(double value) { return map(value, Operator::subtract); } /** * Performs a column vector subtraction subtracting the values in the other NDArray to each column in this NDArray. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray subColumnVector(@NonNull NDArray rhs) { return mapColumn(rhs, Operator::subtract); } /** * Performs a row vector subtraction subtracting the values in the other NDArray to each row in this NDArray. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray subRowVector(@NonNull NDArray rhs) { return mapRow(rhs, Operator::subtract); } /** * Subtracts the values in the other NDArray to this one in-place. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray subi(@NonNull NDArray rhs) { return mapi(rhs, Operator::subtract); } /** * Subtracts a scalar value to each element in the NDArray in-place. * * @param value the value to subtract * @return the new NDArray with the scalar value subtracted */ public NDArray subi(double value) { return mapi(value, Operator::subtract); } /** * Performs a column vector subtraction subtracting the values in the other NDArray to each column in this NDArray * in-place. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray subiColumnVector(@NonNull NDArray rhs) { return mapiColumn(rhs, Operator::subtract); } /** * Performs a row vector subtraction subtracting the values in the other NDArray to each row in this NDArray * in-place. * * @param rhs the other NDArray whose values will be subtracted * @return the new NDArray with the result of this - other */ public NDArray subiRowVector(@NonNull NDArray rhs) { return mapiRow(rhs, Operator::subtract); } /** * Calculates the sum of all values in the NDArray * * @return the sum */ public abstract double sum(); /** * Calculates the sum of squares for all values in the NDArray * * @return the sum of squares */ public abstract double sumOfSquares(); /** * Tests the given predicate on the values in the NDArray returning 1 when TRUE and 0 when FALSE * * @param predicate the predicate to test * @return new NDArray with test results */ public NDArray test(@NonNull DoublePredicate predicate) { return map(v -> { if(predicate.test(v)) { return 1.0; } return 0d; }); } /** * Compares entries in this NDArray with the given NDArray using the given comparison, setting entries to * <code>1.0</code> if the comparison returns true and <code>0.0</code> otherwise. * * @param rhs the other NDArray * @param predicate the predicate * @return the NDArray with test results */ public NDArray test(@NonNull NDArray rhs, @NonNull DoubleBinaryPredicate predicate) { return map(rhs, (v1, v2) -> { if(predicate.test(v1, v2)) { return 1.0; } return 0d; }); } /** * Tests the given predicate on the values in the NDArray returning 1 when TRUE and 0 when FALSE. (in-place) * * @param predicate the predicate to test * @return new NDArray with test results */ public NDArray testi(@NonNull DoublePredicate predicate) { return mapi(v -> { if(predicate.test(v)) { return 1.0; } return 0d; }); } /** * Compares entries in this NDArray with the given NDArray using the given comparison, setting entries to * <code>1.0</code> if the comparison returns true and <code>0.0</code> otherwise. (in-place) * * @param rhs the other NDArray * @param predicate the predicate * @return the NDArray with test results */ public NDArray testi(@NonNull NDArray rhs, @NonNull DoubleBinaryPredicate predicate) { return mapi(rhs, (v1, v2) -> { if(predicate.test(v1, v2)) { return 1.0; } return 0d; }); } /** * Converts the NDArray to double array * * @return the double array */ public abstract double[] toDoubleArray(); /** * Converts the NDArray into an array of DoubleMatrix. (one per slice) * * @return the array of DoubleMatrix */ public abstract DoubleMatrix[] toDoubleMatrix(); /** * Converts the NDArray to double array * * @return the double array */ public abstract float[] toFloatArray(); public abstract float[][] toFloatArray2(); public abstract float[][][] toFloatArray3(); public abstract FloatMatrix[] toFloatMatrix(); @Override public String toString() { return toString(4, 10, 10); } /** * Generates a string form of the NDArray with a maximum number of slices, rows, and columns * * @param maxSlices the max slices * @param maxRows the max rows * @param maxColumns the max columns * @return the string */ public String toString(int maxSlices, int maxRows, int maxColumns) { StringBuilder builder = new StringBuilder("["); if(shape.isVector()) { for(long i = 0; i < length(); i++) { if(i > 0) { builder.append(", "); } builder.append(get((int) i)); } return builder.append("]").toString(); } String outDot = Strings.repeat(Strings.padStart(".", 8, ' '), Math.min(columns(), maxColumns + 2)); printSlice(slice(0), maxRows, maxColumns, builder); int breakPoint = maxSlices / 2; for(int i = 1; i < shape.sliceLength; i++) { builder.append(","); if(i == breakPoint) { int nj = Math.max(shape.sliceLength - breakPoint, i + 1); if(nj > i + 1) { builder.append(System.lineSeparator()) .append(System.lineSeparator()).append(outDot) .append(System.lineSeparator()).append(outDot) .append(System.lineSeparator()).append(outDot) .append(System.lineSeparator()) .append(System.lineSeparator()); } i = nj; } builder.append(System.lineSeparator()).append(" "); printSlice(slice(i), maxRows, maxColumns, builder); } return builder.toString(); } /** * Unitizes the NDArray by dividing the values by L2 Norm (per slice) * * @return Unitized version of this NDArray */ public abstract NDArray unitize(); @Override public void updateVariables(@NonNull Consumer<Variable> updater) { throw new UnsupportedOperationException("NDArray does not support updating."); } /** * Zeros out the entries of the NDArray * * @return this NDArray */ public NDArray zero() { return fill(0d); } /** * Creates a new NDArray with zero values with the same shape as this NDArray. * * @return the new zero valued NDArray */ public abstract NDArray zeroLike(); /** * Interface for testing two double values */ @FunctionalInterface interface DoubleBinaryPredicate { /** * Tests a relation between two double values * * @param v1 the first value * @param v2 the second value * @return True if the predicate evaluates to True, False otherwise */ boolean test(double v1, double v2); } /** * Interface for processing individual entries of an NDArray */ @FunctionalInterface public interface EntryConsumer { /** * Consumes the value of the given index * * @param index the index * @param value the value */ void apply(long index, double value); } public Sequence<?> decodeSequence(@NonNull Encoder encoder, @NonNull SequenceValidator validator) { VariableSequence sequence = new VariableSequence(); String previous = "O"; for (int word = 0; word < rows(); word++) { NDArray matrix = getRow(word); int l = (int) matrix.argmax(); String tag = encoder.decode(l); while (!validator.isValid(tag, previous, matrix)) { matrix.set(l, Double.NEGATIVE_INFINITY); l = (int) matrix.argmax(); tag = encoder.decode(l); } previous = tag; sequence.add(Variable.real(tag, matrix.get(l))); } return sequence; } }
def create_local_chapter_list_registry(): reg = FieldRegistry( 'Local Chapter', description='Local Chapters', extra_schema_dict_values={ 'className': 'inputEx-Group new-form-layout'}) reg.add_property(SchemaField( 'code', 'Local Chapter Code', 'string')) reg.add_property(SchemaField( 'name', 'Local Chapter Name', 'string')) reg.add_property(SchemaField( 'city', 'City', 'string')) select_data = [('ANDAMAN AND NICOBAR ISLANDS','Andaman_and_Nicobar_Islands'), ( 'ANDHRA PRADESH', 'Andhra Pradesh'), ('ARUNACHAL PRADESH', 'Arunachal Pradesh'), ('ASSAM','Assam'), ('BIHAR','Bihar'), ('CHANDIGARH', 'Chandigarh'), ('CHHATTISGARH', 'Chhattisgarh'), ('DADRA AND NAGAR HAVELI', 'Dadra and Nagar Haveli'), ('DELHI', 'Delhi'), ('GOA','Goa'), ('GUJARAT','Gujarat'), ('HARYANA', 'Haryana'), ('HIMACHAL PRADESH', 'Himachal Pradesh'), ('JAMMU AND KASHMIR', 'Jammu and Kashmir'), ('JHARKHAND', 'Jharkhand'), ('KARNATAKA', 'Karnataka'),('KERALA', 'Kerala'), ('LAKSHADWEEP', 'Lakshadweep'),('MADHYA PRADESH', 'Madhya Pradesh'), ('MAHARASHTRA', 'Maharashtra'),('MANIPUR', 'Manipur'),('MEGHALAYA', 'Meghalaya'), ('MIZORAM', 'Mizoram'), ('NAGALAND', 'Nagaland'), ('ODISHA' , 'Odisha'), ('PONDICHERRY', 'Pondicherry'), ('PUNJAB', 'Punjab'), ('RAJASTHAN', 'Rajasthan'), ('SIKKIM','Sikkim'),('TAMIL NADU', 'Tamil Nadu'), ('TELANGANA', 'Telangana'), ('TRIPURA', 'Tripura'), ('UTTARAKHAND', 'Uttarakhand'), ('UTTAR PRADESH', 'Uttar Pradesh'), ('WEST BENGAL', 'West Bengal')] reg.add_property(SchemaField( 'state', 'State', 'string', optional=False, select_data=select_data, extra_schema_dict_values={ 'className': 'inputEx-Field assessment-dropdown'})) return reg
Modeling temporal and hormonal regulation of plant transcriptional response to wounding Abstract Plants respond to wounding stress by changing gene expression patterns and inducing the production of hormones including jasmonic acid. This wounding transcriptional response activates specialized metabolism pathways such as the glucosinolate pathways in Arabidopsis thaliana. While the regulatory factors and sequences controlling a subset of wound-response genes are known, it remains unclear how wound response is regulated globally. Here, we how these responses are regulated by incorporating putative cis-regulatory elements, known transcription factor binding sites, in vitro DNA affinity purification sequencing, and DNase I hypersensitive sites to predict genes with different wound-response patterns using machine learning. We observed that regulatory sites and regions of open chromatin differed between genes upregulated at early and late wounding time-points as well as between genes induced by jasmonic acid and those not induced. Expanding on what we currently know, we identified cis-elements that improved model predictions of expression clusters over known binding sites. Using a combination of genome editing, in vitro DNA-binding assays, and transient expression assays using native and mutated cis-regulatory elements, we experimentally validated four of the predicted elements, three of which were not previously known to function in wound-response regulation. Our study provides a global model predictive of wound response and identifies new regulatory sequences important for wounding without requiring prior knowledge of the transcriptional regulators. Introduction Plants respond to environmental stresses by reprogramming their pattern of gene expression that triggers chemical and physiological responses (). These stress responses can be important to plant survival in their respective niches and are likely subjected to selection (). Wound stress is a common stress experienced by plants when they are under certain biotic stresses such as attack by insects or abiotic stress such as wind damage and can induce a chemical response that produces compounds of human interest (). Response to stresses such as wounding requires gene expression reprogramming, a complex process that involves multiple levels of regulation. At the DNA sequence level, short stretches of DNA (cis-regulatory elements, CREs) are recognized and bound by transcription factors (TFs) that can activate or repress gene expression (Wittkopp and Kalay, 2012). Beyond the level of DNA sequence, chromatin structure can influence whether a regulatory element is accessible to a TF and can be modified based on stress-response signals (). Finally, reprogramming can also occur by modifying or turning over mRNA (;Hutvagner and Simard, 2008). Stress responses change over time, adding temporal complexity to transcriptional response. For example, after an initial response, genes that are turned on may act to turn on or off other genes, resulting in cascading effects. This type of gene expression reprogramming mechanism is beneficial for plants when different responses are needed at different times. For example, response to wounding stress in plants changes over time as the plant first needs to recognize damaging agents, then responds by sending various hormone signals, and ultimately repairs the wound (). This means that stress-responsive genes may be regulated differently depending on when they need to be expressed. The production of various hormone signals allows plants to coordinate their response to different stresses because the interactions of certain hormones can regulate a specific response from the plant by changing the expression of certain genes. For example, response to wounding stress involves several hormones, with the most ubiquitous signal being jasmonic acid (JA; Howe and Jander, 2008). After wounding, JA levels increase and bind to Jasmonate ZIM (JAZ) domain repressor proteins, which allows MYC2 TFs and other basic helix-loop-helix (bHLH) TFs to become active (). MYC2 TFs then activate wounding responses, such as JA biosynthesis, to amplify the JA signal and activate other defensive processes (). Additional hormones interact with JA to moderate wounding response. For instance, while JA induces the expression of certain wound-response genes, ethylene simultaneously represses the expression of these genes at the damaged site in order to make sure the correct spatial response pattern is produced (). Ethylene also works synergistically with JA to fine-tune wounding response by inducing the expression of proteinase inhibitor genes (O') and by activating ETHYLENE RESPONSE FACTOR 1, another TF that triggers defense responses (). Abscisic acid (ABA), which is induced in response to many abiotic stresses, is also induced by wounding (). While ethylene, ABA, and JA rapidly respond to wounding, other hormones such as auxin and cytokinin, start to accumulate around 12 hours (h) after wounding occurs and are involved in signaling for the regulation of expression of genes that work to repair the wound (). While a great deal is known about hormone signaling in response to wounding, it is unclear what other regulatory mechanisms are involved in response to wounding and how these mechanisms interact with hormone signals. Wounding can also induce the production of specialized metabolites that can deter further stress. For example, after wounding stress, Arabidopsis thaliana (Arabidopsis) activates glucosinolate pathways. The glucosinolates and the bioproducts generated from their degradation affect the plant's interactions with biotic stressors, such as microbes and herbivores (Yan and Chen, 2007). Additionally, mutants with decreased glucosinolate levels show greater susceptibility to the necrotrophic fungus Fusarium oxysporum (). Glucosinolate production is regulated by JA, salicylic acid, and ethylene. These hormones work together to modulate glucosinolate levels in response to stress, by activating MYB (myeloblastosis) and DNA-binding with one finger TFs (Yan and Chen, 2007). Additionally, glucosinolates can be divided into different types, such as indole or aliphatic glucosinolates, and the production of these may be induced by various stresses and regulated in different ways (Yan and Chen, 2007). While specific TFs have been shown to turn on glucosinolate biosynthesis (Frerigmann and Gigolashvili, 2014), the regulatory elements or chromatin structure of how and when these TFs bind has not been resolved. At the cis-regulatory level, a few CREs underlying woundresponse regulation have been discovered experimentally. An example is CGCGTT, found in the promoters of genes which rapidly respond to wounding (as well as other stresses) within an hour after treatment (). Another example is the G-box, CACGTG, which is bound by Myc TFs in response to wounding and JA treatment after 1 h (). Other elements implicated in early wound response include W-box (TTGACC), GCC box (AGCCGCC or GCCGCC), jasmonate and elicitor-responsive expression element (AGACCGCC) and drought-response element (TACCGACAT; ;). Most wound regulatory element studies focus on response to wounding after 1 h or validation of the derivatives of the G-box element, with only a few studies focusing on later time-points that are not G-box related (He and Gan, 2001). Studies that have compared early and late time points have focused on changes in gene expression over time (), not on how regulatory elements change or have found only regulatory elements related to the G-box element across time points (). There are likely additional CREs that remain to be discovered at both early and late time points. Notably, the earlier studies have demonstrated the feasibility of establishing computational models capable of predicting plant spatial stress responses to, e.g. high salinity, using known and newly discovered CREs (). In doing so, the computational model provides a means to globally assess the extent to which included CREs are sufficient to predict stress response and to pinpoint the most relevant CREs. Nonetheless, such a model is yet to be established for wound response. Thus, while several studies have found specific regulatory elements related to wounding, a global model of wound-response regulation across time has not emerged. In addition, wound response has been shown to have JA-dependent and JA-independent components (Schilmiller and Howe, 2005). However, existing studies on this area focus on comparing wound-inducible genes that are JA-dependent or JA-independent (e.g. ), roles of regulators controlling JA-independent response (), signaling components of JAdependent response involving COI1 (). There is not yet a study comparing the cis-regulatory program of JA-dependent and JA-independent response. The goals of this study were to uncover the cis-regulatory code involved in regulating temporal responses to wounding stress, to see how wounding stress independent of the wound-induced hormone JA is regulated, and finally to understand how genes in certain specialized metabolism pathways are regulated. Here, we assessed the extent of divergence in gene expression among various time-points following wounding by correlating wounding data with other types of stress or hormone treatment using an existing modeling framework (;). By using a time course data set, where transcriptional response was recorded over a 24-h period (), we captured differences in differential gene expression among varying time-points and the global regulatory pattern required to regulate these transcriptional responses. While this data set has been used before to identify regulatory elements (Ma and Bohnert, 2007), the CREs identified were limited to only known motifs from the PLACE database (), and the focus was on general stress response. In this study, we expand on earlier studies by including CREs derived from model predictions, applying a system-wide modeling approach, and focusing on responses to specific stress i.e., wounding. Because most regulatory elements occur 1,000-bp upstream of the transcriptional start sites in the promoter region of the gene in Arabidopsis (;), we focused on this region to identify putative CREs (pCREs). In addition, by clustering wound-responsive genes into groups based on whether they respond to JA, we were able to single-out differences between JA and non-JA regulatory mechanisms regarding wounding. Furthermore, we identified important regulatory elements for the wound-responsive genes in the pathway glucosinolate biosynthesis from tryptophan, which is induced by wounding. Finally, by using machine learning modeling, we were able to identify the most important regulatory elements for each time-point and experimentally validate one known and three previously unknown elements regulating wounding response. Transcriptional response to wounding across timepoints To understand how transcriptional response to wounding varies across time-points, we used the wounding treatment data from an existing expression data set, which contains seven abiotic stress treatments and where the wounding treatment was applied to 18-day-old Arabidopsis seedlings (). Samples were harvested at multiple time-points ranging from 15 min to 24 h after wounding treatment. The sampling of control treatment was performed in parallel to exclude circadian effects (). We identified genes that were up-or downregulated at the different time-points (diagonal values; Figure 1A) and how frequently the same genes were differentially expressed in these different time-points (lower triangle; Figure 1A). The type of wound-responsive genes was named with two components: time-point after wounding, up-or downregulated after wounding treatment, e.g. 0.25hr_up, 1hr_down ( Figure 1A). There was a cascading effect, where the majority of 0.25hr_up and 0.5hr_up genes overlap with each other (84% and 61%, respectively) and were still upregulated at 1 h (63% and 70%, respectively), but by 3 h 525% of those genes were still upregulated ( Figure 1A, also see Supplemental Data Set S1 for genes present in each wound-responsive cluster). Thus, different time-points after wounding have overlapping but distinct sets of genes, which are up-or downregulated, suggesting temporal variation in how wounding response is regulated. To determine how response to wounding differs from response to other environmental conditions, we measured how similar the pattern of differential gene expression was between different wounding time-points and other abiotic stress, biotic stress, and hormone treatments (see "Materials and Methods"). The Pearson's correlation coefficient (PCC) was used to measure the correlation of the log 2 fold change (log2FC) values across genes between wounding and other stress/hormone treatments. The PCC values of genes were used to group treatment data sets with similar differential gene expression using hierarchical clustering ( Figure 1B). We found that wound response correlated with both abiotic and biotic stress responses ( Figure 1B). Early patterns of wounding differential gene expression (DGE; 15, 30 min or 1 h after wounding) were more highly correlated with those of early abiotic stress response compared with later wounding time-points (12 or 24 h after wounding). Gene expression patterns at 30 min and 1 h after wounding were also more similar to early responses (30 min to 3 h) under abiotic stresses, such as cold, UV-B, osmotic, and genotoxic stress ( Figure 1B; Supplemental Data Set S2). Additionally, early DGE 15 min and 1 h after wounding were more similar to each other (PCC = 0.39) and to 30 min after wounding (PCC = 0.33 and 0.30, respectively) than to later wounding time-points (for PCC results, see Supplemental Data Set S2). Thus, transcriptional responses were more similar among some comparable time-points between treatments than among largely differing time-points within a particular treatment. This indicates that temporal patterns can influence gene expression more than the type of abiotic stress, and that wounding can elicit a similar response to other types of abiotic stress. When observing wounding-response patterns in relation to biotic stress, 15-min, 1-, 12-, and 24-h timepoint wounding responses all correlated with different types of biotic stress ( Figure 1B; Supplemental Data Set S2). Thus, early wounding response is correlated with both abiotic and biotic stress. However, at later time-points (12 and 24 h), wound responses are more highly correlated with late biotic stresses than with any other stress (PCC for biotic stress P. infestans at 12 h was 0.48 and 0.38 for 12 and 24 h). On the other hand, wounding DGE responses at 3 and 6 h after wounding do not correlate with biotic stress response (PCC range -0.09 to 0.07; Supplemental Data Set S2). Our findings confirm that the initial wounding response is akin to general stress response as previously suggested in Walley et al.. In terms of the relationships between wounding and hormonal responses, DGE 15 min after wounding was not similar to DGE 30 min after hormone treatment (PCC range -0.07 to 0.14 for all treatments). By 30 min after wounding, however, DGE was similar to DGE 30 min after treatment with ABA, amino-cyclopropane carboxylate (ethylene precursor, ACC), brassinosteroid (BL), gibberellic acid (GA), and JA, PCCs ranging from 0.37 to 0.52 ( Figure 1B; Supplemental Data Set S2), indicating that initial response to wounding triggers the production of multiple hormones. The DGE responses at 3 and 6 h after wounding were even more similar to the DGE response after 30-min treatment of ABA, ACC, BL, GA, and JA (PCC range, 0.39-0.54), than were most other wounding time-points (PCC range, -0.04 to 0.21; Figure 1B; Supplemental Data Set S2). Finally, 12 and 24 h after wounding, transcriptomic responses showed little correlation with DGE responses after 30 min of hormone treatment (PCC range, -0.15 to 0.26; Supplemental Data Set S2). Overall, the high correlations of DGE patterns in early and 3-to 6-h time-points after wounding to early hormone treatment suggests wounding activates a hormonal response, recruiting hormone-responsive genes among other genes. Modeling temporal wound response using machine learning The temporal differences in transcriptional response to wounding described above suggest that the regulation of wounding-response changes over time, with regulatory control being more similar within early and mid-range timepoints (0.25, 0.5, 1, 3, and 6 h), and within late time-points (12 and 24 h) compared to between these time-points (Supplemental Data Sets S1 and S2). Thus, woundresponsive genes were divided into 12 clusters depending on each time-point and the directions of response, for example 1hr_up refers to being upregulated at 1 h while 3hr_down refers to being downregulated at 3 h (Figure 1; see A B Figure 1 Gene expression correlation across stress and hormone data sets and the overlap of wound and JA differentially expressed genes. A, Heatmap showing the number of genes overlapping in each wound-response cluster. The order of rows and columns are the same, based on time-points and directions of differential regulation. Number of genes range from 0 (white) to 760 (red) and actual values are provided in the heatmap. B, Heatmap of PCC based on the log 2 FC between treatment and control among different conditions (stress or hormone treatment) at different time-points. PCC values in heatmap range from 1 (red) to -1 (blue). The rows and columns were ordered based on hierarchical clustering. The stress and hormone treatments as well as treatment time-points are labeled by colors. "Materials and Methods"). To compare what regulatory mechanisms were important across different time-points/ response directions, we estimated the regulatory code of transcriptional response to wounding for each cluster using machine learning approaches. Here, the regulatory code for a cluster was defined as a machine learning model that could classify a gene as being differentially regulated or nondifferentially regulated in a cluster based on likely regulatory sequences. Note that the regulatory code of downregulation 3 and 6 h after wounding were not modeled because too few genes were in these clusters. First, we tested how well known regulatory sequences were able to model wounding response. We collected 52 known cis-regulatory elements (referred to as CREs) associated with JA, wounding, or insect responses identified previously using experimental or computational approaches (see "Materials and Methods"; Supplemental Data Set S3). We mapped each putative regulatory sequence to the putative promoter regions (see "Materials and Methods") of each gene in a cluster, as well as to genes in a "null" cluster, consisting of genes that are not significantly upregulated or downregulated under any stress or hormone treatment. Two algorithms, random forest (RF) and support vector machine (SVM) were used to build models for each woundingresponse cluster using cross-validation (see "Materials and Methods"). In all sections, RF results were reported unless noted otherwise. To measure model performance, F-measure was used which jointly considers precision and recall (see "Materials and Methods"). Using known CREs, the F-measures for models built for each wound-response cluster ranged from 0.67 to 0.71 (median = 0.68), scores that show our models performed better than random guessing (F-measure = 0.5) but were not perfect predictors (F-measure = 1; for RF models: Figure 2A, for SVM models: Supplemental Figure S1A and Supplemental Data Set S4). Note that the cluster 12hr_down was not analyzed because no known regulatory elements were defined as present in the promoters of the genes in this cluster. Next, we incorporated additional regulatory information to see if our model could be further improved. We included in vitro DNA binding data of 510 TFs in Arabidopsis generated with DNA affinity purification sequencing (DAP-seq; O') and information about DNase I hypersensitive sites (DHSs) in Arabidopsis sampled at different developmental stages including seedling (leaf samples) and 2-week-old plants (flower buds; ). Each DAP-seq and DHSs feature was considered present if its peak coordinates overlapped with the promoter region of a gene. Models trained using both known sequence and DAP-seq and DHSs features generally performed slightly better than models using only known CRE, with the F-measure ranging from 0.66 to 0.74 (median = 0.69; Figure 2; Supplemental Data Set S4). Models for genes upregulated in early wounding response (0.25, 0.5, and 1 h) benefited the most from the addition of these two data sets, with an increase of 0.03, 0.03, and 0.02 in F-measure, respectively. This may be because early wound-response clusters have larger gene numbers than clusters for later time-points. Having a larger gene set may allow for a higher degree of overlap with DAP-seq or DHSs features. Thus, more known information in the form of the DAP-seq data may improve the performance of early time-point clusters more than later time-points. Overall, while known sequence-based information together with DAP-seq and DHSs information is predictive of differential gene expression in response to wounding across time-points, the models still have substantial room for improvement. Determining the relative importance of known motifs and additional regulatory information for predicting temporal wound response To understand what known elements, TFs (based on DAPseq), and DHSs are particularly important for predicting responses at different times after wounding, we determined the importance of each feature in each model (see "Materials and Methods"; Supplemental Data Set S5). In Figure 2B, the top 10 features for upregulated time-point clusters are shown. For early wound response (genes upregulated 0.25, 0.5, and 1 h after wounding), the most important known CREs were CGCGTT (first ranked), a known regulatory element for rapid wound response (RWR; ) and CACGTG (second ranked) that is bound by TFs in the bHLH family in response to wounding and JA treatment (). Genes with the RWR elements are known to respond quickly to wounding and have a variety of functions in the downstream responses, including chromatin remodeling, signal transduction, and mRNA processing (). TFs that respond to wounding stress such as MYC2, MYC3, and MYC4 bind the CACGTG motif and respond to both JA and wounding, and induce other JA-responsive genes, ultimately triggering defense response to herbivory (). In addition to the important contribution to the regulatory code for genes upregulated 0.25-1 h post wounding, CACGTG was still important (ranked 1 or 2) among genes upregulated 3, 6, and 12 h after wounding, while the RWR element was no longer the most important contributor. By 24 h after wounding, the CACGTG element was no longer highly ranked. DAP-seq binding sites were less important in predicting wound response than the known CREs or DHSs ( Figure 2B; Supplemental Data Set S5). A few DAP-seq binding sites ranked among the top 10 most important features, including the calmodulin binding transcription activator (CAMTA) TFs that bind to AAGCGCGTG and were ranked third most important for genes upregulated 0.25 or 0.5 h after wounding but dropped to 11th at 1 h after wounding, and were even lower in later time-points. Consistent with earlier findings, CAMTA TFs are general stress-response factors triggered early during multiple stresses, including wounding (). The CBF TF in the AP2/EREBP family that binds to GGCGGCGGCGG ranked 10th in the 1 h/ upregulation model and 4th at 3 h after wounding. Interestingly, it has been reported that CAMTA TFs regulate CBFs (). Thus, the ranked importance of these TFs at each time-point is consistent with their regulatory interactions. Nonetheless, all DAP-seq sites became less important for predicting genes upregulated 6, 12, and 24 h after wounding. Because known CRE sites in response to wounding were ranked in these clusters but DAP-seq sites were not, this may reflect the fact that DAP-seq identifies TF binding sites (TFBSs) in vitro, regardless of whether the sites are accessible in vivo or not. Together with the mediocre model performance (Figure 2A), these findings show temporal differences in wounding regulatory codes, but also that known CREs and DAP-seq data do not fully capture how wounding response is regulated, especially at later time-points. In addition to known CREs and DAP-seq sites, open chromatin sites (DHS) were important for predicting expression regulation at all time-points after wounding (top-ranked DHS sites for each cluster ranged from ranks 1-4), particularly at later time-points ( Figure 2B; Supplemental Data Set S5). For example, at 24 h after wounding, the top 12 most important features were all DHS-related. We hypothesize two potential explanations for this finding. First, at later time-points, the functional diversity of expressed genes has increased so that their transcriptional regulatory mechanisms have become more complicated (due to both wounding and repair mechanisms) and thus no single CRE or DAPseq feature can be found with high importance. The second possibility is that the known CRE or DAP-seq features important for later time-points are not present in our data set. Although important, DHS sites do not provide additional information to improve the F-measures of our models especially at later time-points. Thus, we hypothesized that regulatory sequences not yet identified could be important regulators of wound response, especially for later wound response. Finding important temporal putative cis-regulatory elements for wound response To test our hypothesis that there were unknown regulatory sequences controlling wounding response, we identified pCREs with a k-mer finding approach (), where all possible 6-30-mer sequences were tested for enrichment in the putative promoters of genes for each wound-response cluster (see "Materials and Methods"). Based on this criterion, 42-1,081 pCREs were identified as enriched in genes from each wound-response cluster, with the exception of the 12hr_down cluster, which had no enriched pCREs (for enrichment statistics of pCREs; see Supplemental Data Set S6). For each wound-response cluster, the pCREs were used to build five replicate woundresponse prediction models and the reported model performance (F-measure) and feature importance were based on averages of the five models. We found that models built with pCREs alone (F-measure range = 0.73-0.81; Figure 2A; Supplemental Data Set S4) perform better than models built with known CREs, DAP-seq and DHSs for all clusters (F-measure range = 0.66-0.74; Figure 2A; Supplemental Data Set S4). Because the number of pCREs exceeds the number of known CREs, we modeled the 1hr_up wounding cluster using only the top 52 pCREs and compared model A B Figure 2 Performance of wound-response cluster prediction models. A, Each row indicates models for a specific wound-response cluster using different input features (columns). The models were generated with RF. Known CREs refers to those reported in the literature (Supplemental Data Set S3). The F-measure ranges from 0.5 (white) to 1 (red). The bar chart next to the heat map represents the numbers of genes in clusters. B, The top 10 most important features in models built using known CREs, DAP-seq sites, and DHSs for upregulated time-point clusters. The bars are colored in the same way as Figure 1B. performance to the model using the 52 known CREs. We found that the top 52 pCRE-based model performs slightly better (F-measure of 0.72) compared to the known 52 CREbased model (F-measure of 0.69). Interestingly, models that were built by combining pCREs with DAP-seq, and DHS data (F-measure = 0.67-0.80; Figure 2A; Supplemental Data Set S4) did not necessarily perform better than models built using only pCREs, with the exception of the 12hr_up timepoint, perhaps reflecting the increasingly more important roles of regulation beyond the cis-regulatory level. We also note that the 12-h and 24-h time-points, as well as downregulated gene clusters, have smaller gene numbers overall, thus while they have high F-measures, this may make these models less generalizable than models with higher gene numbers. In addition to building binary models, we also built a regression model for the 1-h time-point to see if the level of expression could be predicted using regulatory features. The regression models, however, were not predictive (for SVM = PCC -0.07, RF = PCC -0.44; Supplemental Data Set S4), revealing the challenges in predicting expression level but also the benefits of clustering genes based on their expression to make better binary predictions. Overall, these findings indicate that these pCREs contributed information beyond what was available from known DAP-seq and DHSs data in the regulation of wound response at different timepoints. To understand why the models improve by using pCREs, and what influence pCREs have across wounding timepoints relative to known information and open chromatin sites, we looked at the average importance rank (normalized importance score, scaled between 0 and 1, see "Materials and Methods") of all features in the models (including features related to pCREs, DAP-seq sites, and/or DHSs) across the post-wounding time course ( Figure 3). We found that DHSs tend to be the most important features for most time-points ( Figure 3, A-J) apart from late downregulated time-points (Figure 3, K and L). However, in each of these clusters we found some pCREs to be more important than DHS features. For example, at 1hr_up 169 out of the top 200 features were pCREs. Finally, DAP-seq sites were less important than DHSs and pCREs except at 12-h and 24-h time-points for downregulated genes. Although the DHS data used was not generated under wounding stress (), it is surprisingly useful and we cannot rule out the possibility that the plants were actually wounded during sample preparation. To completely capture the importance of chromatin accessibility in wound response, we will need DHS data generated with wounded plant samples instead of using DHSs data generated in a different context. We should also note that DAP-seq sites are always of the least importance. With our findings that adding DHS/DAP-seq information does not improve our models ( Figure 2A) and the fact that pCREs are also important at every time-point, this indicates that the identified pCREs may better uncover the regulatory code complexity underlying wound-response regulation than, particularly DAP-seq sites that are available for a subset of TFs in Arabidopsis. Correlation to TF families and cis-regulatory differences across time Figure 4 shows the importance rank across all time-points for the top 10 most important pCREs for each wounding model. Like how similar sets of genes were differentially expressed at nearby time-points (i.e. the cascading effect), we found more important pCREs were shared between closer time-points ( Figure 4). Because the pCREs were discovered from genes with similar wound-response patterns, this cascading effect on the shared number of important pCREs was expected. However, some pCREs were uniquely important for a narrow time frame (for the importance rank of pCRE and PCC of pCREs with known TF binding motifs (TFBMs), see Supplemental Data Set S7; for raw importance scores, see Supplemental Data Set S8). Next, we determined which pCRE was similar to a known TFBM and which was likely a previously unknown regulatory element. We first calculated the sequence similarity between each pCRE and each known binding motif. For this we used DAP-seq sites, which are generated in vitro, as well as CIS-BP sites (), which are TFBSs found in vivo using chromatin immunoprecipitation sequencing. For early time-points after wounding (i.e. 0.25, 0.5, and 1 h), many of the top important pCREs were shared and resembled TFBSs in the CG-1/CAMTA, bZIP/BZR, FAR1, LOB, and bHLH TF families (right two panels; Figure 4; Supplemental Data Set S7). The finding that different binding sites resemble multiple TF families is consistent with the notion that a variety of signals, and thus TFs, are induced by wounding (Howe, 2004;). However, not all pCREs were highly correlated with known TF binding families as 26 correlations between DAP-seq sites and pCREs, and 77 correlations between CIS-BP sites and pCREs shown in Figure 4 had PCCs 50.75, indicative of substantial differences between pCREs and known TFBSs. Focusing on the top 3 most important pCREs for each time-point ( Figure 5, based on average rank-see "Materials and Methods"), the pCREs CCGCGT and CACGTG were most similar to the binding motifs of CG-1/CAMTA and MYC2 bHLH TFs, respectively. Thus, the timing when they were important for predicting wounding response was consistent with the timing when known CREs for CAMTA and MYC2 TFs were important for the models built using only known CREs, DAPseq, and DHSs ( Figure 2B). The CACGTG element, which is important in binding TFs for JA response (), remained important at both 3 and 6 h after wounding (ranked 10 and 5, respectively), indicating JA responses have been activated. Other top 3 important early pCREs remained important across the wider range of timepoints and were not known as wounding CREs. One example is ACACGT, a pCRE most similar to the binding motif for bZIP family TFs, which are activated by ABA () and regulate responses to water deprivation ( Figure 5). This pCRE was enriched in the promoters of genes from all time-points and important (rank 5 11) for models of wounding response at all time-points except 24 h ( Figure 4; Supplemental Data Set S7). Two pCREs (GTCGGC and GTCACA) that did not resemble known wounding CREs were uniquely important for models built for mid-range time-points (i.e. 3 and 6 h after wounding), as the 5th most important pCREs for the genes upregulated at 3 h and 6th most important at 6 h, respectively ( Figure 5). These elements were most similar to binding motifs of B3 and Homeodomain family TFs, respectively. Given these TF families are involved in development, response to auxin, and secondary wall biogenesis, this indicates that by 3-6 h after wounding, the damage is likely being repaired. At the last two time-points (12 and 24 h after wounding), ATATTAT, which was most similar to binding motifs of TFs in the ARID family, was ranked 24th and 14th respectively ( Figure 5; Supplemental Data Set S7). The ARID family is involved in regulating glucosinolate metabolism, indicating that specialized metabolism pathways are turned on or augmented 12 h after wounding and are still important after 24 h. Another two important pCREs at the latest time-points, ATAATAA and AAAATGT, resemble elements that were bound by TFs from Homeodomain and GRF (Growth-regulating factors) families, respectively ( Figure 5; Supplemental Data Set S7), which regulate development, which may be important in repairing the wound (van der ;;). In summary, we found that pCREs important for our models contain some known wounding CREs, but mostly regulatory sequences that are not known to be involved in wounding response. Additionally, while similar to TFBSs, most pCREs are not identical and contain slight changes in key positions, which may affect binding specificity. PCREs important for wound-response models at early time-points (0.25-0.5 after wounding) tend to be associated with multiple stress and hormone responses, while pCREs important for models 1-h after wounding tend to be associated with TFs involved in JA and ABA signaling. Finally, 3-24 h after wounding the important pCREs tend to be associated with TFs involved in growth and pCREs important for very late responses (12-24 h after wounding) are associated with some TFs related to metabolic defense. Our models of the cis-regulatory code in response to wounding demonstrate how sets of pCREs, which are likely bound by a variety of TFs, are important at different response times after wounding and could work to regulate a dynamic response to wounding over time. Experimental validation of important CREs in early wound response We validated our findings using the CRISPR/Cas9 system in planta to evaluate the biological significance of two of the important pCREs (CCGCGT and CACGTG) based on our model and prior studies. CCGCGT is the top (most important) pCRE found for models of 0.25 (rank = 1), 0.5 (rank = 1), and 1 h (rank = 3) after wounding (Supplemental Data Set S7). CACGTG, a known CRE involved in wounding response (Figueroa and Browse, 2012), is ranked 30, 17, and 8 at 0.25, 0.5, and 1 h after wounding (Supplemental Data Set S7). CCGCGT is a variation of the CGCG box, that has been previously characterized as responsive to wounding signals, as well as other hormone (ABA) and oxidative signals, by binding the TF CAMTA5 (CALMODULIN-BINDING TRANSCRIPTION ACTIVATOR 2, AT4G16150) to the ethylene-responsive gene EIN3 (ETHYLENE-INSENSITIVE3, AT3G20770; Yang and Poovaiah, 2002). The CAMTA-related TF family and its binding to the CGCG box has also been shown to respond to cold treatment and may impart freezing tolerance in Arabidopsis (). However, it is unknown how this motif affects other wound-responsive genes and has not been assessed in vivo via CRISPR-Cas9. Thus, we chose CRISPR/ Cas9 target promoters that contain the CCGCGT motif and sorted out the candidates by the following criteria. First, the expression level of the gene, which has the target motif on its promoter region, was relatively high in fold change at the early time-points in response to wounding stress. Second, the pCRE site is near the PAM sequence such that the site renders susceptible to the CRISPR/Cas9 mutation (). We finally selected genes JAZ2 (JASMONATE-ZIM-DOMAIN PROTEIN 2, AT1G74950) and GER5 (GEM-RELATED 5, AT5G13200). GER5, although not known to be involved in wounding response, was highly expressed 0.5, 1, and 3 h after wounding and contained the CCGCGT motif in the promoter region. JAZ2 is a well-known JA-responsive gene () and the promoter region contained the G-box motif (CACGTG), which can be utilized as a positive control in our mutation assay (Figueroa and Browse, 2012), as well as the CCGCGT motif. Next, we made the CRISPR/Cas9 construct that targets the pCREs in JAZ2 and GER5 promoters and transformed it into Arabidopsis with the Col-0 background. From antibiotic resistance T 1 plants, we found a homozygous mutant called jaz2-4 ger5-3. The jaz2-4 ger5-3 mutant had one base pair insertion in the CCGCGT motif on both JAZ2 and GER5 promoters, where T insertion in the JAZ2 promoter led to no significant nucleotide change from CCGCGT to CCGCGTT, while G insertion in the GER5 promoter caused a base alteration from CCGCGT to CCGCGGT ( Figure 6). Further, we generated a homozygous mutant called jaz2-5 that harbored a mutation within the G-box motif of JAZ2 promoter, in pCREs Up Down CCGCGT CGCGTT ACACGT ACGCGT TACGCG CGCGTTT CGCGTC ACCGCGT CACGTG AACACG AACACGT TACACG CACACG GACTTTT CCGTGT TTTATAT CCACGT AAAGTC AACGTG GATATTT ACGTTA ATTAGT ACGTAT TTTTATA GTCACA TATTTAT GTCGGC AATAATT ATAATAA TGGACC GGACCC GGACCA TTTATA GCCGACA ATATTAT GTGTGAA TTATATA TCACGT ATATAATA TCCACGT AGTACA GAAAAAA TATCCA GATAAG TATGTA TATACT TTATCC TACGAT TGGATAA AGATAAG TGCATG TCATTTT CCAACT GCATGT AAAAATG AAAATGT CGAATA AGATAA CAAGTTG GCATTTT GACCACA TATCTC TTTAGCA CCTAAT GGATAAG GTCCTA GCATGTG AGATATTT AGATATT TCTTATC TACATAT ACAAGT CATCAT CATATG CATCATA Figure 4 Average importance rank for the top 10 pCREs for each wound-response model and their association to a TF family. Woundresponse models are the columns while pCREs are the rows. The top 10 pCREs for each model are shown, and how those pCREs overlap with other models in terms of importance rank. The average importance rank shown is the rank of average importance of a feature across five duplicate models ran for the same time-point. Highest rank is red and ranks 150 or lower are blue. Gray color indicates that the pCRE is not present at that wound-response time-point. Association between pCREs and TF families was based on the similarity (measured using PCC) between sequences of pCREs and the previously reported binding sites (identified by DAP-seq or cis-BP) of TF families. The TF family with the maximum PCC to a pCRE was associated with the pCRE in question. PCC is shown for both DAP-seq (degrees of blue color) and cis-BP (degrees of green color) sites. which the CACGTG motif was mutated to CACGTTG ( Figure 6). To determine the effect of the motif mutations on their downstream gene expression upon wound treatment, we harvested the seedlings of jaz2-4 ger5-3 and jaz2-5 mutants, as well as Col-0 controls, 1 h after wounding. The transcript abundances of both mutants and Col-0 were analyzed by reverse transcription quantitative polymerase chain reaction (RT-qPCR). In the jaz2-4 ger5-3 mutant the expression of the JAZ2 gene was upregulated after wounding, exhibiting the same phenotype as the Col-0 control ( Figure 6). Thus, as expected, regulation of JAZ2 was not altered in jaz2-4 ger5-3 by wounding, because the CRISPR-Cas9 mutation had resulted in a synonymous change. Although this in itself does not show that the G-box affects wounding, it is what was expected from the CRISPR result since CACGTT is also considered to be a G-box, and this was previously shown to be important for regulating JA/wound expression (Figueroa and Browse, 2012). Interestingly, the expression level of GER5 was not changed in jaz2-4 ger5-3 upon wound treatment, while the expression of GER5 was significantly upregulated in the Col-0 control (fold increase = 5.52). This indicates that CCGCGT, a derivative of the stress-responsive motif CGCG-box, enables the GER5 gene to respond to early wounding and is disabled by the G insertion. The JAZ2 expression was upregulated in response to the wounding treatment in both Col-0 and the jaz2-5 mutant. Additionally, the GER5 transcript level increased after wounding in the jaz2-5 mutant (Supplemental Figure S2). In the case of JAZ2, the G-Box CACGTG was changed to CACGTT, which is a G-Box variant (). Thus, while significant, the change did not substantially alter the JAZ2 response (fold increase = 1.04) compared with the jaz2-4ger5-3 mutant (fold increase = -0.08). The GER5 expression was also not markedly different in the jaz2-5 mutant relative to the Col-0 in response to wounding (fold increase = -1.73, P = 8.07E-05). Taken together, these results indicate that the CCGCGT CRE is responsible for the wounding response of GER5. Experimental validation of important unknown pCREs at later time-points In addition to CREs important for early wounding response at or before 1 h, we validated important pCREs at 3 and 6 h time-points (see Supplemental Data Set S7) using amplified luminescent proximity homogeneous assay (ALPHA) in vitro DNA-binding experiments complemented by mutations in protoplasts coupled with a reporter gene to evaluate in vivo DNA binding (see "Materials and Methods"). For genes upregulated after 3 h of wounding, the most important pCRE to be GTCGGC, a site most similar to sites that bind the AP2EREBP TF family, and the next most important pCRE to be ACACGT, similar to BZR TFBSs. For genes upregulated 6 h after wounding, the most important pCRE is AACGTG, a derivative of the G-box motif (CACGTG) that also binds Myc TFs (). Thus, we decided to test the next most important unknown pCRE, GTCACA, which is most similar to NAC TFBSs. The next most important pCRE for the 6-h time-point is ACACGT, which was also important at three hours. In the end, we targeted three additional pCREs: GTCGGC, ACACGT, and GTCACA from the promoters of three genes AT5G07010, AT2G02990, and AT5G13220, respectively, for further testing. For each pCRE, we first identified candidate TF of which the binding site is the most similar to the CRE sequence (see "Materials and Methods" and Supplemental Data Set S7). For the ALPHA assay, recombinant proteins for TF candidates were purified (Supplemental Figure S3A) and tested against their predicted motif for binding. For GTCACA and GTCGGC, more than one of the TF candidates were tested (Supplemental Data Set S9) where a subset did not bind (Supplemental Figure S3, B-D). For all three pCREs, the WT probes produced strong Alpha signals indicating protein binding to the DNA at all tested TF protein concentrations (Figure 7). In contrast, the probes containing the mutated pCRE sequences either did not produce signals or at a significantly lower level compared to WT probes (Figure 7). These findings indicate that the WT pCRE sequence is important for binding the candidate TFs. Next, we checked in vivo binding using a protoplast assay for GTCGGC and GTCACA important for 3-h and 6-h post-wounding, respectively. For each gene containing either the GTCGGC or GTCACA pCRE in their promoters, we first measured mRNA accumulation levels in unwounded and wounded Arabidopsis plants and in Arabidopsis protoplasts (Supplemental Figure S4). We found expression for each gene increased after wounding compared to unwounded plants. In addition, we found expression in protoplasts for each gene exceeded the amount of expression in unwounded plants, and sometimes that of wounded plants. Therefore, transcript levels increased due to wounding was similar to the expression increase seen in protoplasts, relative to unwounded plants. We hypothesize that because of the similarity in gene expression there may be similarities in how the genes of wound response and protoplasts are regulated. Next, we assembled each pCRE site or its mutated version as a tetramer in a head-to-tail orientation upstream of a luciferase reporter gene (harboring a minimal CaMV 35S promoter, see "Materials and Methods"; Figure 8A) that was co-transfected into the Arabidopsis protoplasts. For both pCREs, we found significantly higher luminescence levels in protoplasts with WT sequences than with mutated ones ( Figure 8B). This indicates that each motif is sufficient to induce expression in Arabidopsis protoplasts while the mutated motifs are not. Thus, the predicted GTCGGC and GTCACA motifs regulate reporter gene expression in vivo, providing evidence for their functionality in wound-response regulation, but stress that this does not experimentally show these pCREs to be directly involved in wound response. Figure 5 Motif logos for the top three pCREs for each upregulated wound-response cluster. Chart is divided by time-point (0.25-24 h after wounding). The first column is the top 3 ranked pCREs for each time-point. Note that ranking includes other features such as DHSs and DAP-seq sites, therefore the actual pCRE rank may be lower. The second column shows the average rank for a pCRE in the given model. The third and fourth columns show the best matched TFBM logos, with forward and reverse complement sequences, respectively. PCC values between pCREs and the TFBMs are indicated in the third column. Columns 5-7 are the TF that binds to a given binding site (column 6), the TF family the TF belongs to (column 5), and GO categories of the TF (column 7). Modeling the regulatory code of JA-dependent and JA-independent gene response across wounding time-points Having demonstrated pCREs important for predicting wound response, we next studied the regulatory differences between JA-dependent and JA-independent genes in the context of wound response. JA-independent wounding responses include those induced by RNase and nuclease activities that are triggered by wounding but not by the application of JA (). Thus, to understand how JA-independent wound responses are regulated, we used the hormone treatment data () to identify wound-responsive genes that were also responsive to JA or not at 0.5, 1, and 3 h, for which data for both JA and wounding treatments are available. For these three time-points, 84%, 74%, and 72% of genes were upregulated after wounding but not after JA treatment, respectively (Supplemental Figure S5), consistent with the findings of a prominent JA-independent component in wounding response in other studies (;). With this information, we divided the wound-response clusters from the 0.5-, 1-, and 3-h time-points into JA-dependent and JA-independent gene subclusters and generated model predicting wound response for each cluster using known CREs, DAP-seq sites, DHSs, and/or pCREs. Similar to our earlier results, pCRE-based models (F-measures: 0.73-0.87) outperformed both known CREs (0.67-0.74) and known CREs/DAP-seq/DHSs-based models (0.66-0.73; Figure 9; for SVM models: Supplemental Figure S1B and Supplemental Data Set S4). Thus, pCREs were able to better model the regulation of JA-dependent and JA-independent wounding response across time-points, than known TFBSs. By comparing the importance of known CREs, DAP-seq sites, DHSs, and pCREs across models, we identified how JAdependent and JA-independent responses differed in which known CREs and pCREs were important. At 30 min and 1 h after wounding, for example, CGCGTT, the RWR element, and CACGTG, the G-box recognized by many bHLH factors (Heim, 2003;), were the most important known elements for the JA-independent and JA- jaz2-4 ger5-3 line). B, CRISPR/Cas9-mediated mutation in the CCGCGT motif in the GER5 promoter region in jaz2-4 ger5-3. Chromatogram represents the sequence of the JAZ2 or GER5 promoter region modified by CRISPR/Cas9 (upper chromatograms), and the corresponding region in Col-0 (lower chromatograms). Blue and red boxes indicate PAM sequence and gRNA target regions, respectively. C and D, Wound responses of JAZ2 (C) and GER5 (D) expression in Col-0 and in jaz2-4 ger5-3. Transcript abundances of JAZ2 or GER5 evaluated by RT-qPCR were normalized to ACTIN2. NW and W indicate no wound, collection after 1 h, and wound treatment after 1 h, respectively. Values for biological triplicates are shown using individual bars, while values for three technical repeats for each biological replicate were depicted with error bars. Significance levels of differences from the one-way analysis of variance were indicated with asterisks (Non-Significant P 4 0.05, *P 5 0.05, **P 5 0.01). dependent models, respectively (see Supplemental Data Set S10 for pCREs, DHSs, and DAP-seq sites and their respective importance scores for JA-independent and JA-dependent models). Interestingly, the G-box element also ranks as the third most important feature in the JA-independent models. This could be because other TFs that are not involved in JA response (e.g. Myc-LIKE and BIM3 TFs) can also bind to this element (O') or because the Myc element may be necessary to facilitate TF binding to a different regulatory element important for JA-independent response. For pCREs, with the exception of the G-box motif and the bZIP binding site (ACGTGT), there was little overlap in the ranking of important motifs between the JA-dependent and JAindependent models (Supplemental Figure S6). For example, AACGTG and CACGTTT were ranked from 1 st to 7 th across time-points in JA-dependent models but were not present or were ranked much lower (69 th to 157 th ) for JAindependent models (Supplemental Figure S4 and Supplemental Data Set S8). In contrast, CCGCGT and GCCGAC were the most important pCREs 0.5 and 3 h after wounding in the JA-independent models but were not present or were ranked much lower (232 nd importance) for JA- Figure 7 Binding of three TFs to identified cis-regulatory motifs. WT probes containing motifs ACACGT (A) from promoter AT2G02990, GTCGGC (B) from promoter AT5G07010, and GTCACA (C) from promoter AT5G13220, and their corresponding mutant probes were incubated with recombinant His6-AT4G18890, His6-AT4G32040, and His6-AT4G36900 proteins, respectively, and the DNA binding affinity was determined by ALPHA assay. Three different concentrations (0, 50, and 100 nM) of the proteins were examined with 10-nM probe in three technical replicates and two biological replicates. Similar trends of the results were obtained from the two biological repeats and one representative was shown with error bars indicating the standard deviation of technical replicates. The tested cis-regulatory motifs and the mutated sequences within the motifs are indicated shaded and underlined, respectively. The different letters indicate significant differences between groups evaluated by one-way analysis of variance followed by the Tukey's multiple comparison test at 5% significance level. GTCGGC GTCGGC GTCGGC GTCGGC Minimal 35S A B Figure 8 Mutation in the GTCACA and GTCGGC motifs attenuate expression of a reporter gene, when placed as tetramers upstream of a minimal 35S promoter. A, Firefly luciferase fused to the cis-regulatory motifs (WT or mutant-WT constructs are shown) were coelectroporated into Arabidopsis Col-0 protoplasts together with p35S:Renilla reporter, and luminescence levels were evaluated by dual bioluminescence assay. B, Luciferase activity was normalized by Renilla luciferase activity. Data represent mean ± SD of three biological replicates of each WT and respective mutant construct, and an asterisk indicates P 5 0.05 (Student's t test). dependent models (Supplemental Data Set S10). Finally, we found that, of the top 10 most important features for each model, four to eight were DHSs for JA-independent models but none for JA-dependent models (Supplemental Data Set S10). Taken together, these findings highlight how JAdependent and JA-independent responses were regulated by different sets of regulatory elements and were characterized by distinct chromatin accessibility patterns. Modeling metabolic pathway regulation using wound stress data We next assessed whether the regulatory sequences identified allow us to understand wounding response at the level of metabolic pathways. Here, we asked which specialized metabolism pathways were enriched in genes upregulated across the wounding time series (Supplemental Data Set S11). Depending on the time-point, 5$-11 pathways were significantly enriched in wound-response genes. From 0.25 to 3 h after wounding, JA biosynthesis was the most highly enriched pathway (P-values range from 1.5e-3 to 3.5e-7; Supplemental Data Set S11). However, by 12 h it is no longer significantly enriched. Another example was the glucosinolate biosynthesis from tryptophan (Gluc-Trp) pathway, its pathway genes were enriched 0.5 h after wounding (P = 0.008), peaked at 12 h (P = 0.0008) and were not significantly enriched by 24 h. In addition, AT2G38240 (JASMONATE-INDUCED OXYGENASE4) and AT5G05600 (JASMONATE-INDUCED OXYGENASE2) from the JA biosynthesis pathway were upregulated at 0.5 h after wounding and remained upregulated throughout the time course (Supplemental Data Set S11). These examples demonstrate the effect of wounding on metabolic pathways and that these wounding-responsive pathways exhibit distinct response patterns. Using the Gluc-Trp pathway as an example, we further assessed the regulatory basis of the wounding responses of genes in this pathway. By 0.5 h after wounding, three Gluc-Trp genes were significantly upregulated and at 1 h three additional genes were significantly upregulated (see stars; Figure 10A). Looking beyond the first hour, we saw a cascading effect, whereby 3 h after wounding, the genes upregulated at 1 h were still upregulated, but the three genes that were first upregulated at 0.5 h were no longer upregulated. Continuing this trend, by 6 h after wounding, only one gene that was upregulated at 1 and 3 h after wounding was still significantly upregulated ( Figure 10A). This pattern could be because genes upstream in the pathway are involved, directly or indirectly, in upregulating downstream genes in the pathway. To understand how the cascading response was regulated, we mapped the pCREs found from each of the woundingresponse time-point models built for upregulated genes to the putative promoters of the Gluc-Trp pathway genes ( Figure 10B). Starting at 0.5 h after wounding, there was little overlap of important pCREs (red in Figure 10B) across time-points except for pCREs present at 6 and 12 h after wounding. This indicates that for the Gluc-Trp pathway, genes turned on at different times have different CREs. For example, ACACGT, which resembles bZIP binding motif (PCC = 1), is the most important element at 0.5 h after wounding ( Figure 10C) and is not found in Gluc-Trp pathway genes upregulated at other time-points except at 24 h. The treatment-time-specific nature is generally true among the top pCREs except for AACGTG, which was enriched in the promoters of Gluc-Trp pathway genes upregulated 1, 3, 6, 12, and 24 h after wounding. In summary, pCREs responsible for upregulation of Gluc-Trp pathway genes upon wounding varied for different time-points after wounding, indicating that timing of response is an important consideration when identifying CREs. Furthermore, these results highlight that a series of regulatory elements acting at different times, rather than one canonical element, is central to regulating pathways triggered by a specific environment. Conclusion The aim of this study was to better understand the temporal differences in transcriptional response to wounding stress in Arabidopsis. We accomplished this by integrating multiple levels of regulatory information (e.g. sequence-based and epigenetic features) into machine learning models of the regulatory code that could be used to predict if a gene was up-or downregulated at a specific time-point after wounding. This system-wide, modeling approach adopted in this. DAP-seq and DHS refer to the DAP-seq and DHSs, respectively. FET enriched 6-mer refers to the pCREs, which were enriched for a specific cluster. The Fmeasure range is from 0.5 (white) to 1 (red), and gradient as well as actual F-measure is shown in each cell. The bar chart next to the heat map corresponds to each row/cluster and represents the number of genes in that cluster. Note that the models were not generated for genes downregulated 3 h after wounding because there were not enough genes available for training. study allows us to address the critical question: how well the known CREs allow for predicting whether a gene would be wound responsive or not. We demonstrated that wounding response is regulated by a diverse set of regulatory elements that are likely bound by TFs from a wide range of TF families, in addition to elements that were previously identified. We identify 4,255 pCREs derived from wounding coexpression clusters which are upregulated at different timepoints, with 3,493 (82%) having high sequence similarity (PCC 4 0.8) to known TFBSs, although they are not identical and it is mostly unknown whether they are involved in wound response. These pCREs were more predictive of differential expression at each wounding time-point than models based on known TFBSs (derived from the literature and the DAP-seq database) and information about open chromatin sites. From our machine learning models, we quantified the relative importance of each pCRE included in the model for each time-point. While some pCREs were important across multiple time-points, we generally found that pCREs were either important for early or late time-points after wounding. Our study also provides a comparison of the cis-regulatory programs of JA-dependent and JA-independent responses. We identified 2,569 pCREs important for predicting genes upregulated in response to wounding but not upregulated in response to JA treatment. Of these, 2,371 (92%) had strong sequence similarity (PCC 4 0.8) to known TFBSs. Finally, by focusing on genes in the Gluc-Trp pathway, we identified pCREs important for predicting genes in this wound-responsive specialized metabolite pathway. While our models perform notably better than random expectation, there remains room for improvement. One possible reason we could not predict differential expression more precisely is that we limited our study to focus on CRE sites in the promoter region ( + 1-kb upstream of the transcription start site). However, CREs located in other regions, including the downstream untranslated regions, introns, or coding regions of a gene, can be useful for predicting whether the gene in question is stress responsive () and could be evaluated in future studies. Another limitation Figure 10 Co-expression and regulation of glucosinolate from tryptophan pathway genes. A, Heatmap showing the log2FC values of all genes in the Gluc-Trp pathway across the seven wounding time-points. Genes are clustered using hierarchical clustering. Genes are on the y-axis, wounding time-points are on the x-axis, and log2FC is represented as the color gradient from a value of 2 or greater (red) to a value of -1 or less (blue). Stars indicate genes are significantly upregulated at a given time-point. B, Scaled importance score of pCREs mapped to Gluc-Trp genes, which are upregulated at a given wounding time-point. Importance is scaled from 0 to 1, where 1 is the most important and 0 is the least important. Each row is a pCRE and each column is the wounding time-point. C, The most important pCRE for Gluc-Trp pathway genes at a given time-point. First three columns show the wounding time-point, pCRE, and the correlation of the pCRE to a known TF binding site shown as a motif logo, respectively. The fourth column shows the scaled importance value of that particular pCRE for the Gluc-Trp genes at each time-point. Genes in glucosinolate (Trp) pathway is that genes up-or downregulated at a particular timepoint might not be all regulated the same way. This is especially likely for larger time-point gene groups, like the 1hr_up cluster, which contained 760 genes. If we could further break down this group, perhaps based on the genes' responses to other stresses, we may be able to model more specific responses at 1 h and improve the overall performance. Finally, the data sets regarding DAP-seq and DHS sites did not come from wounded plants, and therefore were not capturing any changes that may occur in TFBSs or chromatin state after wounding. DAP-seq and DHSs data may indeed improve predictions if drawn from a similar treatment specific data set. Other studies have shown that the association with histone proteins can change under stress response () and DAP-seq sites have been shown to be important in temporal nitrogen signaling gene expression in Arabidopsis roots and shoots (). Many of the important pCREs found in this study have not been shown to be associated with wounding. This is especially true for pCREs found at later time-points, which have been less well studied. With technologies such as CRISPR-Cas9, it is feasible to generate precise edits to the DNA to test the role of these pCREs in temporal wounding response experimentally. We mutated the pCRE CCGCGT and this resulted in a significant decrease in expression of the target gene GER5 under wounding treatment. Additionally, we show three predicted CREs, GTCGGC, GTCACA, and ACACGT, to bind to their predicted TF in vitro, and two of these to control protoplast expression in vivo, and can be followed up with experiments using stable transgenic lines to show clear association with wound response. Our study demontrates the feasibility of modeling temporal response to wounding computationally and, through intepreting the models, identifies a set of important putative cis-regulatory targets. Finally, the computational framework in this study can be applied to assess the cis-regulatory mehanisms in other contexts. We found regulatory sequences previously unknown but likely to be important to JA dependent or independent responses which should be tested in future experiments. Another example is the Gluc-Trp pathway donstrating that we were able to identify pCREs regulating wound response at the pathway level. Therfore, we expect that the same approach can be applied generally to any sets of genes commonly regulated in a specific environment, stage of development, tissue/cell type, and timing to generate model supported hypotheses of cisregulation. Expression data sets and analysis Microarray data from three different AtGenExpress studies were downloaded from TAIR and CEL files were processed using the Affy package (1.68.0; ) in R (4.0.1). The studies included biotic stress (), abiotic stress (;), and hormone treatment (), where wounding is part of the abiotic stress data set. Arabidopsis plants used in those studies grew under similar conditions and were treated 18 days after germination. Those studies were all part of the AtGenExpress project. Each study had eight different treatments of either different stresses or hormones, resulting in a total of 24 data sets. Samples from each data set were collected after treatment at a range of time-points, including 15 min, 30 min, 1 h, 2 h, 3 h, 4 h, 6 h, 12 h, and 24 h after treatment. Note that not all time-points were used in this study for each treatment. For each data set, controls were collected at the same time in order to control for circadian effects. Differential expression was calculated using Affy and limma (3.46.0) packages in R (;), and significantly differentially expressed genes were those that had an absolute log2FC 51 (log2FC 51: upregulated, log2FC less than or equal to -1: downregulated) and adjusted (false discovery rate corrected for multiple comparisons) P 5 0.05. For each treatment within each expression data set, PCC was calculated for all pairwise combinations. Gene clusters The wounding time-point clusters were determined by two considerations: time-point after wounding (0.25, 0.5, 1, 3, 6, 12, and 24 h), direction of differential expression (upor downregulated). For example, genes that were upregulated at 0.25 h after wounding belong to the 0.25hr_up cluster, while genes that are downregulated at 1 h after wounding made the cluster 1hr_down. This created a total of 14 wounding clusters. For wounding and JA clusters, genes were placed in a cluster based on whether they were differentially expressed in one or both treatments at the same time-point. For example, a gene X upregulated in both 1 h after wounding and 1 h after JA treatment would be placed in cluster 1hr_up/1hr_up, while gene Y upregulated in 1 h after wounding but not differentially expressed under 1 h after JA treatment would be placed in cluster 1hr_up/ 1hr_NC (NC: no changes). Thus, a gene Z upregulated in 1 h after wounding but downregulated in 1 h after JA treatment would be placed in cluster 1hr_up/1hr_down. Therefore, for the three time-points available for both wounding and JA treatment data sets (0.5, 1, and 3 h), there are potentially 18 clusters: 3 time-points 2 regulation directions after wounding (up-or downregulated) 3 regulation directions after JA treatment (up-or downregulated, or no changes). Three of these potential clusters contained no genes and were subsequently omitted (0.5hr_up/ 0.5hr_down, 0.5hr_up/1hr_down, 0.5hr_up/3hr_down). In addition, a nondifferentially expressed cluster was determined by genes, which were not differentially expressed across all stress and hormone treatments, including all timepoints. For the information of all gene clusters (genes and the number of genes for each cluster) and the overlap between clusters, see Supplemental Data Set S1. Known CRE curation and pCRE finding Known regulatory elements, including elements reported to be responsive to JA treatment, wounding, or insect stress, were curated from a literature search (Supplemental Data Set S3). Both the known CREs with experimental evidence and predicted by computational approaches were included. For pCRE finding, putative promoter regions of each gene (identified as 1-kb upstream of the transcription start site) were downloaded from TAIR for Arabidopsis. Homemade python scripts (https://github.com/ShiuLab/MotifDiscovery) were used to identify all possible combinations of k-mers (oligomer sequences of length k) present in gene promoters. The Fisher's exact test (FET) was then used to determine the overrepresented pCREs in the promoter region (defined as 1,000-bp upstream of gene start site) for a given woundresponsive gene cluster compared with the nondifferentially expressed cluster. Four P-value cutoffs (adjusted P 5 0.01, P 5 0.01, adjusted P 5 0.05, and P 5 0.05) were explored for the FET, and the second one (P 5 0.01) performed best for the later machine-learning models. Starting with all possible 6-mers, pCREs which were found to be significantly overrepresented in the target clusters were kept. Next, the k-mer finding was performed for 7-mers, which were produced by adding one nucleotide to the enriched 6-mers on either side, thus there were eight possible 7-mers for each 6-mer. These 7-mers were again tested to see if they were significantly overrepresented in the given cluster, and if their P-value was lower than that of the parent 6-mer. If this was true, the 7-mer was kept and the 6-mer was discarded. If not, the 7-mer was discarded and the 6-mer was kept. This progressive procedure of "growing" k-mers continued until the longest k-mer with a P-value lower than its predecessor was obtained. The enriched pCREs were then used as features (present or absent for a pCRE in a gene) to predict whether a gene belongs to a particular woundresponsive cluster or the non-differentially expressed cluster in machine-learning models. Arabidopsis cistrome and epicistrome Two data sets with in vitro TFBMs were used to correlate the pCREs with the TFBMs. For the first, the position weight matrices for Arabidopsis TFBMs determined from protein binding arrays () were downloaded from the CisBP database (http://cisbp.ccbr.utoronto.ca). For the second, DNA affinity purification sequencing (DAP-seq) peaks (O') were downloaded from the PlantCistrome Database (http://neomorph.salk.edu/ PlantCistromeDB). The coordinates of the peaks (which are determined by the previous research group) were then mapped to the Arabidopsis genome using python scripts. If the peak overlapped with the promoter of a gene of interest, the peak was considered present as a feature for that gene. To provide insight into chromatin structure, bed files for DHSs in Arabidopsis () were obtained from the National Center for Biotechnology Information database under the ID number GSE34318. The DHS sites were assessed using samples from leaf and flower of both WT and ddm1-2 mutant plants. The DHS peak coordinates were obtained from BED files (https://www.ncbi.nlm.nih.gov/ geo/query/acc.cgi?acc=GSE34318) and then mapped to the Arabidopsis genome. If the peak overlapped with the promoter of a gene of interest, the peak was considered present as a feature for that gene. Machine learning models Prediction models were built for each wound-responsive cluster as well as for wounding-JA cluster, where the presence or absence of enriched pCREs from the promoter analysis were used as features to predict whether a gene belongs to the cluster in question or the non-differentially expressed cluster. Two machine learning algorithms implemented in the scikitlearn package (), RF and SVM, were used to build the model for each cluster. Python scripts used to run the models can be found here: https://github.com/ ShiuLab/ML-Pipeline. For each model, 10% of the data were withheld from training as an independent, testing set. Because the data set was unbalanced (e.g. there were 760 genes in the 1hr_up cluster while 6,855 genes in the nondifferentially expressed cluster ), 100 balanced data sets were created by randomly drawing genes from the null gene cluster to match with the number of genes in the target cluster. Using the training data, grid searches over the parameter space of RF (max_depth = 3, 5, 10, max_features = 0.1, 0.5, sqrt, log2, None, n_estimators = 100, 500, 1,000) and SVM (Kernel=Linear, C = 0.001, 0.01, 0.1, 0.5, 1, 10, 50) were performed. The optimal hyperparameters identified from the grid search were used to conduct a 10-fold cross-validation run (90% of the training data set were used to build the model, the remaining 10% were used for validation) for each of the 100 balanced data sets. We compare model performance using F-measure defined as: Where Precision tp tpfp and Recall tp tpfn and tp = true positive, fp= false positive, fn = false negative. Throughout the manuscript, we compare the RF models as model performance can change based on the algorithm, but all F-measures are reported in Supplemental Data Set S4. Thus, in a binary model, a perfect prediction has an F-measure of 1 and the F-measure of random expectation is 0.5. The RF models also provide an importance score for each input feature, which is determined by the average decrease in impurity of a node in a decision tree across the forest when the feature is used. Thus, features with higher importance scores are more important for a RF model. Importance values were then normalized by scaling between 0 and 1. Features were ranked based on their importance scores for a model, and the average rank of a feature across five duplicate models run for the same timepoint was used as the average importance rank of the feature. Percentile rank is calculated as dividing the rank of a feature by the total number of features. Performance of regression models were measured using Pearson's correlation of predicted log2FC to actual log2FC, where a PCC of 1 is a perfect correlation, 0 indicates no correlation, and -1 indicates an anti-correlation. For each cluster, models with only known CREs were built, and then models with known CREs plus DAP-seq and DHSs information were built. Finally, models with DAP-seq, DHSs and enriched pCRE information were built. Additionally, for the final model type, five separate models for each wounding time-point cluster, each with 100 balanced replicates were run to determine the rank for each feature (pCREs) from most important to least important. This was then used to get an average importance rank of features from the five models (for average importance ranks, see Supplemental Data Set S7; for raw importance scores from each of the five models, see Supplemental Data Set S8). Before ranking, reverse complement pCREs were removed, so that essentially the same pCRE was not ranked twice. To assess random expectation, gene clusters chosen randomly from the expression data sets, pCREs were found using the same methods as above, and were used to build machine learning models using the methods above. Random gene clusters were made for genes at n = 30, 50, 100, 150, 200, and 250 at 20 repetitions each. Model results are reported in Supplemental Data Set S4. Sequence similarity between pCREs with known TFBSs TAMO/1.0 () was also used to create a tamo file for each pCRE, which was then used to measure the similarity of the pCRE to known TFBSs. To compare pCREs to known TFBSs, pairwise PCC distance between pCREs and TFBSs (both DAP-seq and TFBMs from CIS-bp) was generated using the TAMO program (version 1.0; ). After calculating the PCC distance to all possible TFBSs, the TFBSs with the lowest distance (highest PCC) was determined for each pCRE as its best match and was then used for visualization of the binding site logo. Code for parsing TAMO output can be found at: https:// github.com/ShiuLab/MotifDiscovery/tree/master/TAMO_ scripts. CRISPR-Cas9 mutagenesis The and CRISPR/Cas9 mutants generated in this study were grown on soil (Suremix growth medium, Michigan Grower Products Inc., USA) or Murashige and Skoog (MS) media (PhytoTech Labs, USA) containing 0.8% agar under a photoperiod of 16-h white light/8-h dark at 23 C, with the light provided by fluorescent bulbs of $100 mmol m -2 s -1. Wound treatments were done on plants grown on MS medium when they were 18 days old as in Kilian et al.. For each set of three biological replicates, three individual plants were wounded using hemostats as in Koo et al., plant tissues were pooled and harvested 1 h after wounding, frozen in liquid nitrogen, and then stored at -80 C until RNA was extracted. For the construction of CRISPR plasmids, two gRNAs were simultaneously assembled into the pHEE401E vector by the Golden Gate assembly method (). The gRNA sequences used in this study are shown in Supplemental Data Set S12. The CRISPR plasmids were transformed into GV3101 Agrobacterium strain, followed by floral dipping into Arabidopsis (Col-0). The T 1 transgenic plants were grown in MS media containing hygromycin (25 mgL -1 ) for 3 weeks. Genomic DNA was extracted from the rosette leaf of the hygromycin-resistant T 1 plants, and the promoter regions of JAZ2 and GER5 were amplified by genomic PCR using the primers, which are listed in Supplemental Data Set S12. The sequences of the regions targeted by CRISPR/Cas9 were validated by Sanger sequencing. All statistical tests for experiments are described in Supplemental Data Set S13. The expression levels of JAZ2 and GER5 were analyzed in the T 2 generation with three biological replicates, for which different sets of seedlings were individually collected. Total RNA from CRISPR-Cas9 mutants was extracted with RNeasy Plant Mini Kit (Qiagen, USA) following manufacturer's instructions. Approximately 500 ng of RNA was used for cDNA synthesis with SuperScript II Reverse Transcriptase (Invitrogen, USA). The transcript levels of JAZ2 and GER5 were determined by quantitative real-time polymerase chain reaction (PCR) (Quantstudio 3 Real-Time PCR, Thermo Scientific, USA) using SYBR Green PCR Master Mix followed by manufacturer's instruction (ThermoFisher Scientific, CA, USA). The C t values of the genes were normalized to those of ACTIN2. The PCR primer sets were described in Supplemental Data Set S12. Protein expression and purification For the DNA-binding affinity test, Gateway donor vectors obtained from the ABRC (Supplemental Data Set S8) were recombined into pDEST17 vector (ThermoFisher Scientific, USA) using LR Clonase (ThermoFisher Scientific, USA). The pDEST17 constructs harboring the TFs were used for the protein expression and purification. For protein purification, the His 6 -tagged TFs were transformed into Escherichia coli BL21 (DE3) strain. The cells were cultured in 50 mL until reached optical density (OD) 600 % 0.4 at 37 C. For recombinant protein induction, 0.5-mM isopropyl b-D-1-thiogalactopyranoside was added to the cultured media and incubated at 37 C for 2 h. The cells were collected, resuspended in 5-mL modified phosphate-buffered saline (PBS) buffer (500-mM NaCl, 10-mM Na 2 HPO 4, 2-mM KH 2 PO 4, 0.05%, and Triton X-100), and lysed by a sonication (Misonix Ultrasonic Liquid Processors S-4000, USA). After centrifugation at 3,500g for 20 min at 4 C, the supernatant was incubated with 100 mL of 50% (w/v) slurry of Ni-NTA agarose (Thermo Fisher Scientific, USA) for 1 h and washed with 10 mL of PBS buffer containing 50-mM imidazole. The resin was eluted by 200 mL of 400-mM imidazole in PBS buffer four times. All protein purification procedures were performed at 4 C. The quality and quantity of protein was verified on sodium dodecyl sulfate-polyacrylamide gel electrophoresis (SDS-PAGE) gel (15%, 37.5:1 acrylamide:bisacrylamide, BioRad, USA) followed by Coomassie Brilliant Blue (G-250, Thermo Scientific, USA) staining. Evaluating DNA-binding activity using ALPHA DNA binding affinity test was carried out by ALPHA according to the manufacturer's protocol (PerkinElmer, USA; ). Briefly, the purified His 6 -tagged proteins (0, 50, and 100 nM) were incubated with 0 and 10 nM of streptavidin-conjugated DNA probes for 1 h at room temperature. The mixture of protein and probe was incubated with anti-His 6 AlphaLISA Acceptor beads (20 lgmL -1, PerkinElmer, USA) for 1 h at room temperature followed by incubation with AlphaScreen Streptavidin Donor beads (20 lgmL -1, PerkinElmer, USA) for 30 min at room temperature. The total mixture was transferred into white 384-well OptiPlate (PerkinElmer, Waltham, MA, USA) and the signal was read in an Alpha-compatible reader. For two biological replicates, two sets of recombinant proteins were obtained from different batches and were subjected to the assays with three technical replicates for each Alpha reaction. Measuring induced gene expression by RT-qPCR in protoplast For a luciferase assay, the DNA fragments containing four copies of cis-regulatory motifs and corresponding mutant probes were synthesized by Integrated DNA Technologies and cloned into the pENTR/SD/D-TOPO vector (ThermoFisher Scientific, USA). The pENT constructs were subcloned into the pLUC2 vector (Kim and Somers, 2010). The primers and DNA probes used in this study are in Supplemental Data Set S11. Protoplasts were isolated from 3-week-old Col-0 as in a previous study () with around 1 10 5 cells for each transformation. The isolated protoplasts were cotransfected with 500 ng of reporter construct and 100 ng of Renilla construct containing Renilla luciferase gene driven by 35S promoter () as described previously (). After co-transfection, protoplasts were incubated for 12 h at room temperature in darkness and the luciferase activity was measured by dual-luciferase reporter assay kit (Promega, USA) according to the manufacturer's protocol using a microplate luminometer. Total RNA was extracted from the protoplasts using RNeasy Plant Mini Kit (Qiagen, USA) following manufacturer's instructions. Around 10 ng of RNA was used for cDNA synthesis with SuperScript II Reverse Transcriptase (Invitrogen, USA). The transcript abundance of the gene of interests was determined by quantitative realtime PCR (Quantstudio 3 Real-Time PCR, Thermo Scientific, USA) using SYBR Green PCR Master Mix followed by manufacturer's instruction (ThermoFisher Scientific, CA, USA) by normalizing it to the level of ACTIN. The primers used in this study are described in Supplemental Data Set S12. Pathway enrichment and pCRE mapping Pathway annotations were downloaded from the Plant Metabolic Network Database (https://www.plantcyc.org/). Enrichment tests were performed by using python scripts (https://github.com/ShiuLab/GO-term-enrichment) and the Python Fisher 0.1.9 package, which implements the FET. To map the enriched pCREs to the promoter regions of genes in the glucosinolate from tryptophan (Gluc-Trp) pathway, gff files were created that contained the coordinates of pCREs in the promoters of all Arabidopsis genes. Genes that were annotated in the Gluc-Trp pathway and expressed at a wounding time-point were examined for the presence/absence of the enriched pCREs. Finally, the importance scores of pCREs, which were mapped to Gluc-Trp genes were determined for each wounding time-point model. Supplemental data The following materials are available in the online version of this article. Supplemental Figure 1. Heatmap of the F-measure for all wounding SVM models (supports Figure 2). Supplemental Figure 2. Mutation in the CACGTG motif of the JAZ2 promoter led to the downregulation of JAZ2 expression following wound treatment (supports Figure 6). Supplemental Figure 3. Recombinant proteins used in ALPHA experiments and ALPHA assay for CRE-TF pairs with no significant binding (supports Figure 7). Supplemental Figure 4. Wound-responsive expression of the genes downstream of GTCACA or GTCGGC cisregulatory motifs (supports Figure 8). Supplemental Figure 5. Gene overlap of JA-dependent and JA-independent clusters (supports Figure 9). Supplemental Figure 6. Average importance rank for the top 10 pCREs for each JA-dependent and JA-independent wound-responsive model and the associated TF families (supports Figure 9). Supplemental Data Set S1. Sample cluster overlap and genes in each cluster. Supplemental Data Set S2. Between sample PCC results. Supplemental Data Set S3. Known cis-regulatory elements derived from literature. Supplemental Data Set S4. All machine learning model results. Supplemental Data Set S5. Feature importance for models using only known elements or sites. Supplemental Data Set S6. All pCREs enriched for each wounding time-point cluster and their P-values. Supplemental Data Set S7. Summary table for the importance rank of each pCRE for each cluster and their correlation to DAP-seq or cis-BP sites. Supplemental Data Set S8. Raw importance scores for wounding models. Supplemental Data Set S9. DNA binding activity of six TFs. Supplemental Data Set S10. Overall feature importance score for wounding JA-dependent and JA-independent clusters. Supplemental Data Set S11. Pathway enrichment for each wounding time-point cluster and P-values. Supplemental Data Set S12. Primers used for CRISPR-cas9 and qPCR and promoter sequences of experimental genes. Supplemental Data Set S13. Statistical analyses for each experimental figure.
Frank Fitzpatrick Early life Fitzpatrick was born in Detroit, Michigan and graduated from the University of Michigan's music and business schools in 1983. While there, he worked for Eclipse Jazz, a student-run body striving for better exposure of jazz artists through the production of live shows in the Ann Arbor area. Early Career Fitzpatrick relocated to Los Angeles in 1983 to work for record producer Richard Perry of Planet Records. He started working as a music editor for television in 1984, initially as a supervising music editor on the series Alvin and the Chipmunks and Crime Story, among others. Fitzpatrick later expanded into film music by working as a music editor for the film composer Georges Delerue, going on to co-produce an orchestral retrospective of Delerue's film scores in 1991, The London Sessions, which included a track co-written by Fitzpatrick and performed by Carl Anderson entitled "Between You and Me." Soundtracks Throughout the nineties, Fitzpatrick worked as a film composer, music supervisor and executive music producer in Hollywood. The first film score he is credited for is that of Nuns on the Run, the 1990 comedy featuring Eric Idle and Robbie Coltrane. Further contributions in the 90s included soundtracks for the films Friday, Pirates of Silicon Valley and In Too Deep, as well as working as the composer and music director for The Larry Sanders Show. Songwriter and music producer In 2010, Fitzpatrick was nominated for a Grammy Award for the Anthony Hamilton song “Soul Music”, the title track for the 2008 film Soul Men, in the category ‘Best Traditional R&B Vocal Performance’. That year, in partnership with Terry McBride, Fitzpatrick produced Yoga Revolution, a compilation CD to promote yoga and meditation programs in schools. The album featured recordings from Sheryl Crow and Sting, among others. His 2017 single “Call On Me” and music video from Universal’s film Bring It On: Worldwide were part of a non-profit initiative to promote music and arts for schools. Filmmaker In 2001, Fitzpatrick directed and produced the short film Jungle Jazz: Public Enemy #1, which premiered at the 2001 Berlin International Film Festival, winning The Silver Bear Award for best Short Film. The film won additional awards in Sydney, São Paulo, Belo Horizonte, Kansas City and Santa Cruz. In 2007, Fitzpatrick completed his first animated short film The Rebel Angel. The majority of his film work has come through music videos. Amongst his work are the 2011 videos “Hip Hop Nation” (featuring recording artists KRS-One and K’naan) and “Express Yourself” (featuring Nneka and Ziggy Marley). In 2018, Frank was an executive producer for the romantic comedy Love Jacked. EarthTones Fitzpatrick is the founder of EarthTones, a non-profit arts organization. In 2012, EarthTones launched the WHY Music project, an initiative to provide people with a way to best utilize the benefits of music in all the areas of their lives. WHY Music began with a series of articles in the Huffington Post, and expanded to include a series of live talks, round tables, and workshops. That year, EarthTones partnered with City of Hope to launch Yoga For Hope in Los Angeles, in support of people dealing with life-threatening diseases. In 2013, Fitzpatrick was nominated as a Gifted Citizen from the Cuidad de las Ideas for the WHY Music project's potential to positively impact over 10 million lives. The project later expanded further with a program tailored for schools. In 2016, Fitzpatrick collaborated with humanitarian photographer Lisa Kristine to create the video "A Prayer for Freedom", part of the End Slavery Campaign initiated by Pope Francis and a coalition of spiritual leaders from around the world. The video premiered at the Vatican in Rome, the House of Lords in Westminster, and at the opening of the Enslaved Exhibition at the National Underground Railroad Freedom Center in Ohio. Social entrepreneurship In 2011, Fitzpatrick attended the Executive Program at Singularity University. The following year, he helped facilitate Singularity University's inaugural Hollywood Executive Program. In 2014, he joined the faculty of Exponential Medicine at the university, an annual conference discussing the implications of breakthrough technologies on healthcare. In 2017 he was a speaker at the conference, presenting as an expert on music and health. Along with Director of Pepperdine’s Center for Media and Entertainment, Craig Detweiler, Fitzpatrick created and hosted the 2014 International Forum Education: Disrupted at Pepperdine University. Fitzpatrick has been invited as a delegate to the 2012 Skoll World Forum for Social Entrepreneurs, the 2014 UK Arts and Humanities Research Council’s forum on Video Games, Music Creativity and Education, and the 2017 Novus Summit at the United Nations. He has also been a keynote speaker at the TEDx and Esalen.
<reponame>h3ct0r/python-dublin-traceroute try: # Python 3 import builtins except ImportError: # Python 2 import __builtin__ as builtins try: # Python 2, json.dump writes a `str` from StringIO import StringIO StringIO.__enter__ = lambda *args: args[0] StringIO.__exit__ = lambda *args: args[0] except ImportError: # Python 3, json.dump writes a `unicode` from io import StringIO import pytest import dublintraceroute def test_tracerouteresults_init(): r = dublintraceroute.TracerouteResults({}) assert list(r.keys()) == [] def test_tracerouteresults_save(monkeypatch): def _open(fname, mode): return StringIO() monkeypatch.setattr(builtins, 'open', _open) r = dublintraceroute.TracerouteResults({}) r.save('dummy file name')
/** * Print debugging information about the executed request and response to System.out. * * @author Rossen Stoyanchev * @author Sam Brannen */ @Ignore("Not intended to be executed with the build. Comment out this line to inspect the output manually.") public class PrintingResultHandlerTests { @Test public void testPrint() throws Exception { standaloneSetup(new SimpleController()).build().perform(get("/")).andDo(print()); } @Controller private static class SimpleController { @RequestMapping("/") @ResponseBody public String hello() { return "Hello world"; } } }
<gh_stars>1-10 //fcfs Algorithm #include<stdio.h> #include<limits.h> int sort(int *B,int *A,int n,int *C,int *D,int *P,int in) { int i; int max=INT_MAX;int cha; for(i=0;i<n;i++) { if(A[i]<max) { cha=i; max=A[i]; } } C[in]=max; A[cha]=INT_MAX; D[in]=B[cha]; P[in]=cha+1; return 0; } void main() { int i; printf("Enter the number of process :"); int n; scanf("%d",&n); int A[10],B[10],C[10],D[10],P[10]; for(i=0;i<n;i++) { printf("Enter the burst time of %d process :",i+1); scanf("%d",&B[i]); } for(i=0;i<n;i++) { printf("Enter the arrival time of %d process :",i+1); scanf("%d",&A[i]); } for(i=0;i<n;i++) { sort(B,A,n,C,D,P,i); } int GT[10];int sum=0; for(i=0;i<=n;i++) { GT[i]=sum; sum+=D[i]; } int j; int in=0;int W[10];int TT[10]; for(i=0;i<n;i++) { for(j=0;j<n;j++) { if(P[j]==i+1) { TT[in]=GT[j+1]-C[j]; W[in]=GT[j]-C[j]; in++; break; } } } int Waitingtime_avg=0; int turnaroundtime_avg=0; printf("\n"); for(i=0;i<n;i++) { printf("Waiting time for Process%d is: %d\n",i+1,W[i]); Waitingtime_avg+=W[i]; } printf("\n"); printf("Average waiting is: %2f \n",(float)Waitingtime_avg/n); printf("\n\n"); for(i=0;i<n;i++) { printf("Turnaround time for Process%d is: %d\n",i+1,TT[i]); turnaroundtime_avg+=TT[i]; } // printf("\nAverage waiting is: %2f ",(float)Waitingtime_avg/n); printf("\nAverage turn around time is: %2f ",(float)turnaroundtime_avg/n); }
A Toxicology for the 21st CenturyMapping the Road Ahead The landmark publication by the National Research Council putting forward a vision of a toxicology for the 21st century in 2007 has created an atmosphere of departure in our field. The alliances formed, symposia and meetings held and the articles following are remarkable, indicating that this is an idea whose time has come. Most of the discussion centers on the technical opportunities to map pathways of toxicity and the financing of the program. Here, the other part of the work ahead shall be discussed, that is, the focus is on regulatory implementation once the technological challenges are managed, but we are well aware that the technical aspects of what the National Academy of Science report suggests still need to be addressed: A series of challenges are put forward which we will face in addition to finding a technical solution (and its funding) to set this vision into practice. This includes the standardization and quality assurance of novel methodologies, their formal validation, their integration into test strategies including threshold setting and finally a global acceptance and implementation. This will require intense conceptual steering to have all pieces of the puzzle come together. The willingness to accept risks in daily life is diminishing continuously. The willingness to lessen efforts of safety evaluations is therefore low. Thus, a generally accepted consensus is that new approaches must not lower the current safety standards. This leads quickly to a concept, where current methodologies are considered gold standards, which need to be met. It is therefore not sufficient to develop new approaches, but also to show their limitations in comparison to current regimes, a process normally summarized as validation. However, this typically leads to a strategy where out of the ''patch-work'' of the toxicological tool-box, maximally one patch is replaced by a new one. This does not really open up for a new general approach. We might take a different view and ask ourselves, how regulatory toxicology would be done, if we had to design it from the scratch. The vision of the NRC committee (Andersen and Krewski, 2009;National Research Council, 2007) is laying out such a new design, putting forward a new approach based on modern technologies and in a more integrated way ). This includes likely the accumulated knowledge on pathways of toxicity, modern technologies such as (human) cell culture, omics technologies (genomics, proteomics, metabonomics), image analysis, high-through-put testing, in silico modeling including PBPK (physiology-based pharmaco-, here more toxico-, kinetic modeling) and QSAR (quantitative structure activity relationships). Federal agencies have already joined forces to attempt this (). For the purpose of this article let us assume the feasibility of this approach. It is unrealistic to assume that it will be a one test approach, which does the whole job-likely it will be another test battery (tool-box). However, it is the first hypothesis put forward that we can only gain if we construct this new approach from scratch and not only replace or add new ''patches.'' We shall explore here, which fundamental problems remain, and if such a battery of novel tests can be achieved, independent of the technologies to be applied. CHALLENGE 1: TESTING STRATEGIES INSTEAD OF INDIVIDUAL TESTS Today's approach to regulatory testing is rather simple: one problem, one test. Limitations of this approach have been discussed earlier (Hartung, 2008b) especially when considering the low prevalence of most hazards (Hoffmann and Hartung, 2005). It is important to note that in vitro tests do not have less limitations that the in vivo ones (Hartung, 2007b). A toxicology based on pathways is one which is likely based on various tests, be it in a battery (i.e., where all tests are done to derive the results) or a test strategy (i.e., where tests are done based on interim decision points). Our experiences with the first approach are poor and bad: Combining typically three mutagenicity tests in a battery resulted in a disaster of accumulating false-positives (): only 3-20% of positive findings are real-positiveshardly an efficient strategy. We therefore need other ways to combine tests for the different pathways in a different way, but we have neither a terminology for test strategies nor tools to compose or validate them. the open access version of this article for noncommercial purposes provided that: the original authorship is properly and fully attributed; the Journal and Oxford University Press are attributed as the original place of publication with the correct citation details given; if an article is subsequently reproduced or disseminated not in its entirety but only in part or as a derivative work this must be clearly indicated. For commercial reuse, please contact [email protected]. CHALLENGE 2: STATISTICS AND MULTIPLE TESTING When testing for multiple pathways, we will need to correct our statistics for multiple testing. We have to lower significance levels accordingly or we will run increasingly into false-positive findings. The proponents of the new approach assume more than a hundred, and less than a thousand such pathways. A lot of multiple testing... Assuming only 100 pathways, significance levels of p 0.05 would have to be lowered to 0.006 using the most common Bonferroni correction. This-likely with sophisticated methods of high inherent variance-will result in an astronomic number of replicates necessary: For the example of p 0.05 and stable noise/signal ratios, a 71-fold increase in sample size (e.g., number of animals or replicate cellular tests) is necessary to reach the same level of confidence. CHALLENGE 3: THRESHOLD SETTING Where does a relevant effect start? Certainly not where we can measure a significant change. What is measurable depends only on our detection limits, and in the case of multiendpoint methods a lot on signal/noise relation and the inevitable number of false-positive results. If, for example, a toxicogenomics approach is taken, several thousand genes might be measured and, especially when low thresholds of foldinduction are used, false-positive events will occur. Even if real-positive, the questions arising are then, whether this is significant with the given number of replicates, or even more important, whether this is relevant (notably completely different questions). Although the former can be tested with replicate testing and statistics (see, however, problem of multiple testing), the relevance is more difficult to establish: The more remote we are in (sub-) cellular pathways, the more difficult to extrapolate to the overall organism. The NRC vision document is not really clear here, whether we talk of cells and their signal transduction pathways or the even more complicated physiological pathways of in dynamic systems with compensatory mechanisms. What does it mean if a pathway is triggered but if accompanied by some compensatory ones as well? We definitively have to overcome the mentality of ''we see an effect, this is an effect level.'' Any method, which assesses only a certain level of the organism (e.g., the transcriptome when using genomics), will be questioned whether these changes are translated to the higher integration levels (proteome, metabolism, physiology). This argues for systems biology approaches where such considerations are taken into account, but complexity of modeling increases dramatically, with impacts on standardization, costs, feasible number of replicates etc. The greater the distance from the primary measurement to the overall result in a model, the more difficult threshold setting will become because of error propagation. Setting of thresholds or other means of deriving a test result (data analysis procedure) is a most critical part of test development. It determines the sensitivity and specificity of the new test, that is, the proportion of false-positive and falsenegative results. Noteworthy, this needs to be done before validation, but we already need a substantial ''training set'' of substances to derive the data analysis procedure, that is, the algorithm to convert raw experimental results into a test result (positive/negative, highly toxic, moderate, mild...). This raises the question, where such reference results come from? CHALLENGE 4: WHAT TO VALIDATE AGAINST? The first problem is that the choice of the point of reference determines where we will arrive. If the new toxicology is based on animal tests as the reference, we can only approach this ''gold standard'' but will not be able to overcome its shortcomings. We have suggested () the concept of composite reference points, that is, a consensus process of identifying the reference result attributed to a reference test substance. This allows at least the investigator to review and revise individual results, but does not change the main problem that mostly animal data are only available. The second problem is that it is unlikely that we will be able to evaluate the entire pathway-based test strategy in one step. So the question becomes what to validate against, if we have only partial substitutes? If we have the perfect test for a pathway of toxicity, where are the data on substances to test against-we will typically not know which of the many pathways was triggered in vivo. As a way forward we have proposed a ''mechanistic'' validation, where it is shown that the prototypic agents affecting a pathway are picked up while others not expected to do so are not. There are various challenges to the validation process as it is right now (), which we have discussed elsewhere (;Hartung, 2007a). Especially for the complex omic technologies and other information-rich and demanding technologies, we are only starting to see the challenges for validation. We have coined the term ''postvalidation'' () to describe the cumbersome process of regulatory acceptance and implementation. It is increasingly recognized that it is neither the lack of new approaches nor their proven reliability by validation, but that translation into regulatory guidelines and use is now the bottle-neck of the process. Change requires giving up on something not to add to it. As long as most new approaches are considered ''valuable additional information,'' the incentive to drive new approaches through technical development, validation and acceptance is rather low, given 10-12 years of work of large teams and costs of several hundred thousand dollars. The process is so demanding MAPPING THE ROAD AHEAD FOR TOXICITY TESTING because regulatory requirements often mandate virtually absolute proof that a new method is equal to or better than traditional approaches. Most importantly, to let go from tradition requires seeing the limitations of what is done today. This discourse was too long dominated by animal welfare considerations. This has been convincing for parts of the general public, but the scientific and regulatory arena is much less impressed by this argument. Especially, if personal responsibility and liability come into play, traditional tests are rarely abandoned. Costs are not too much of an issue, because they are the same for competitors and become simply part of the costs of the products. In general, costs are less than 1%-often 0.1%-of the turnover of regulated products (Bottini and Hartung, 2009). Thus, the major driving force for change would be that we can do things better. However, limitations are not very visible in this field and the interest to expose them is low. In order to identify limitations, we would need, first of all, transparency of data and decisions, and establishment of reproducibility and relevance of our approaches. Neither of this is given: data are typically not published and/or are considered proprietary; repeat experiments are often even excluded by law and data on the human health effects are rare for comparison (Hartung, 2008a). This leaves us in a situation, where ''expertise,'' that is, the opinion of experts rules, whereas hard data (''evidence'') are short in supply. Interestingly, clinical medicine is in a similar situation, that is, facing the coexistence of traditional and novel scientific approaches. Here, however, information overload rather than lack of data is the problem. A remarkable process has taken place over the last two decades, which is called Evidence-based Medicine (EBM). The Cochrane Collaboration includes more than 16,000 physicians and has so far produced about 5000 systematic reviews compiling the available evidence for an explicit medical question in a transparent and rigorous process. Among others this has stimulated the development of metaanalysis tools in order to combine systemically information from various studies. We (Hoffmann and Hartung, 2006) and others () have put forward the idea to initiate a similar process for toxicology. Consequently, the first International Forum toward and Evidence-based Toxicology (EBT) was held in 2007 bringing together more than 170 stakeholders from four continents (www.ebtox.org, Griesinger et al., in press). Three main areas of interest emerged a systematic review of methods (similar to the review of diagnostic methods in EBM), the development of tools to quantitatively combine results from different studies on the same or similar substances (analogous to meta-analyses); and the objective assessment of causation of health as well as environmental effects. This movement is still in its infancy. However, it promises to help with a key obstacle, that is, identifying the limitations of current approaches, and thus might be the door-opener for any novel approach. Due to its transparency and rigor in approach, judgments are likely more convincing than classical scientific studies and reviews. At the same time, the objective compilation of conclusions from existing evidence requires the development of tools, which will have broader impact, especially the meta-analysis type of methods or scoring tools for data quality. The latter has been furthered as a direct outcome of the EBT forum in a contract and expert consultation by the European Centre for the Validation of Alternative Methods (ECVAM) (Schneider et al., unpublished data). It aims to base the well-known ''Klimisch scores'' () for data quality on a systematic set of criteria, one set each for in vivo and for in vitro data. This might have enormous impact for the requested systematic use of existing data, for example, for the European REACH registration process of existing chemicals. It provides the means to include, reject or weigh existing information before decisions on additional test needs are taken, or for combined analysis. With regard to the novel toxicological approaches, however, most important will be that existing and new ways are assessed with the same scrutiny. Sound science is the best basis for the selection of tools. Validating against methods believed to do a proper job is only betting and will always introduce uncertainty about the compromise made while forgetting about the compromise represented by the traditional method. The term of evidence-based toxicology must not be confused with weight-of-evidence approaches, which describe an often personal judgment of the different information available to come to an overall conclusion, for example, in genotoxicity. Such approaches have also been suggested in the validation process (), but they represent rather compromise solutions in the absence of final proof. In contrast, EBT aims to use all evidence reasonably available to come to a judgment in a transparent and objective manner. CHALLENGE 6: THE GLOBAL DIMENSION A central obstacle for the introduction of new approaches is globalization of markets. Globally acting companies want to use internationally harmonized approaches. This means that change to new approaches if not forced by legislation, will occur when the last major economic region has agreed on the new one. A teaching experience (Hartung, in press) was the Local Lymph Node Assay (LLNA) in mice (notably an in vivo alternative method) to replace guinea pig tests such as the Buehler and guinea pig maximization test. Since 1999, the LLNA is the preferred method in Europe reinforced by an animal welfare legislation, which requests such reduction and refinement methods to be used whenever possible, and since 2001, the test is OECD-accepted. Still, in 2008, we found that out of about 1450 new chemicals registered with skin sensitization data since 1999 in Europe, only about 50 had LLNA data, whereas the rest was still tested in the traditional tests. This illustrates the resistance to change even when ''only'' exchanging one animal test by another. We can imagine, how much more difficult this 20 will be for in vitro or in silico approaches, or the complex new approaches aimed for now. This means that efforts to create a novel approach need global buy-in. National solutions will quickly encounter nontechnical problems for implementation and acceptance. When lobbying for programs to identify pathways of toxicity (or more general of interaction of small molecules with cells) project, the aim should be for a global program from the start, for example, similar to the human genome project. We should, however, not be too negative about the impact of globalization. As discussed earlier (), this might as much constitute a driver for change as it is now an obstacle. International harmonization is an opportunity to export standards of safety assessments to trade partners (Bottini and Hartung, 2009). CHALLENGE 7: QUALITY ASSURANCE FOR THE NEW APPROACH For the global use of methods, it does not suffice to agree on how to test. If we want to accept approaches executed at other places, challengeable quality standards for performance and documentation of tests must exist, as they have been developed as OECD Good Laboratory Practice (GLP) or various ISO standards. GLP was, however, developed mainly for the dominating in vivo tests. Building on a workshop to identify the gaps for a GLP for in vitro approaches (), and the parallel development of Good Cell Culture Practices (;), now some OECD guidance for in vitro toxicology is available. However, for the complex methods envisaged for the new type of toxicity tests will require a much more demanding quality assurance. We must learn how to report properly the results from new methods like genomics or QSARs-we have to imagine, how difficult it will be to report in a standardized manner the whole process. A key problem will be the fluid nature of the new methodologies: standardization and validation requires freezing things in time, every change of method requires re-evaluation not possible for the complex methodologies. On the contrary, we see continuous amendments of in silico models or new technologies (e.g., gene chips). Shall we validate and implement a certain stage of development and close the door for further developments? This is exactly what is required for international agreements on methods-and it is difficult to imagine for complex methods still under development. Things would be easy if a new regulatory toxicology would become available at once-we might then compare old and new and decide to change. But we will continue to receive bits and pieces ( Fig. 1) as we have already experienced for a while. When should we make a major change and not just add and replace patches? What is not clear is, where the mastermind for change will come from. Which group or institution will lead us through the change? Given the substantial efforts for the development of each piece, we can not wait for their implementation until everything is ready. The first of two solutions is to implement the new methods in parallel to gain experience with the new without abandoning the old. Beside the costs, this will create the problem of what to do with discrepant information. Although we need this on the one hand (or we will not result in something new), we will not be able to neglect any indications of hazard from new tests (we have lost our innocence), even though they have not yet taken over. The second opportunity is to start with those areas where we have new problems and explore the new opportunities. This might include new health endpoints such as endocrine disruption, developmental neurotoxicity, respiratory sensitization or new products such as biologicals, genetically modified organisms, nanoparticles, or cell therapies. But in both cases we might fall into the trap of just adding new patches without substantial change. This means we have to somehow to organize a transition. Many people working at different angles of the whole will not create the ''new deal.'' CHALLENGE 9: HOW TO ORGANIZE TRANSITION? Beside the technological challenge, we have identified the need for systematic combination of approaches (integrated testing), and a program to assess objectively current approaches, to validate them and to implement them. This program requires out-of-the-box thinking, that is, intellectual steering (Fig. 2). As a first step, with the financial support of the Doerenkamp-Zbinden foundation (http://www.doerenkamp.ch/en/) which have been created in recent years five professorships for alternative MAPPING THE ROAD AHEAD FOR TOXICITY TESTING approaches (e.g., at the universities of Erlangen, Konstanz, Utrecht, Geneva, and most recently, Johns Hopkins in Baltimore). Moreover, a Transatlantik Think Tank of Toxicology (T 4 ) was created between the toxicological chairs of these institutions, which aims to collaborate on dedicated studies and analyses, and workshops to support the paradigm shift in toxicology. Certainly this is only the first little step, but it might form a nucleus for further initiatives. Similarly, the Forum series in Toxicological Sciences, and discussions at the SOT meetings, furthers the shaping and sharpening of ideas. CHALLENGE 10: MAKING IT A WIN/WIN/WIN SITUATION Three major stakeholders will have to collaborate to create the new toxicology, that is, the academia, regulators and the regulated communities in industry. This collaboration is still more an exception (for example the European Partnership for Alternative Approaches (EPAA), between 40 companies, 7 trade associations, and the European Commission) than the rule (Bottini and Hartung, 2009;Hartung, 2008c). Academia has not been involved, although research funding and emerging technologies may help to increase academic engagement. The time for validation and acceptance of new methods of about one decade make this area only of little attraction for academics, and most are not in a dialogue thereby enabling them to understand the needs of industry and regulators. The exchange between industry and regulators is also often poor, perhaps driven by concern that providing more information is only giving more opportunity for further requests. In the United States, we lack both the public/private partnership and the research funding into alternative approaches. By basing the novel toxicology less on animal welfare and more on sound science considerations, this might change in the future. The shear dimensions of the tasks ahead will require a trans-disciplinary, trans-national, trans-stakeholder, and trans-industrial sectors approach. Information hubs such as AltWeb (http://altweb.jhsph.edu/), AltTox (http:// www.alttox.org/), EPAA (http://ec.europa.eu/enterprise/epaa/), EBTox (http://www.ebtox.org), ECVAM (http://ecvam.jrc.it/), and the Center for Alternatives to Animal Testing (http:// caat.jhsph.edu/) have a key role here. There is gain for all players including the following: the challenge of the development of new approaches; the better understanding of limitations of our assessments; the likely development of safer products with new test approaches; and the international harmonization prompted by a major joint effort. There is economic gain as well (Bottini and Hartung, 2009), but while we are talking broadly about science, ethics and politics, this has not been sufficiently addressed. The stairway to ''Regulatory Toxicology version 2.0'' is steep, but the goal merits the effort.
#pragma once #include "RenderWindow.h" #include "Keyboard/KeyboardClass.h" #include "Mouse/MouseClass.h" #include "Graphics/Graphics.h" class WindowContainer { public: WindowContainer(); LRESULT WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam); protected: RenderWindow render_window; KeyboardClass keyboard; MouseClass mouse; Graphics gfx; private: };
The Advantages and Feasibility of Externally Actuating a High-speed Rotary On / Off Valve The effective displacement of a fixed displacement pump/motor can be made virtually variable with the addition of a high-speed on/off valve using concepts borrowed from power electronics. To achieve both highspeed and high efficiency, a novel 2 degree-of-freedom (DOF) high-speed rotary on/off valve is being developed at the University of Minnesota. This paper investigates the effect of actuation strategies on valve efficiency when implementing the valve system on a hydraulic hybrid passenger vehicle. Optimization results show that when the input flow rate through the valve varies, rotating the valve with an external actuator produces higher efficiency than relying on self-spinning, i.e. designing the valve to function as a turbine. A 2 DOF external driving mechanism is proposed for implementing external actuation on a prototype valve, and experimental results are presented that validate the functionality of the design.
Delvey, whose real name is Anna Sorokin, will be deported at the end of her trial or sentence. Anna Sorokin, a German national who came to be known around high society in New York City as Anna Delvey, will be deported back to her home country for scamming banks and individuals, regardless of the outcome of her trial. The Daily Mail says that Anna Delvey came to the United States initially under the visa waiver program, but she has overstayed that program which allows people from 38 countries to visit the U.S. for up to 90 days without a visa. Rachael Yong Yow, a spokeswoman with U.S. Immigration and Customs Enforcement (ICE) says that the department of corrections has been instructed to hand over the German national as soon as the criminal proceedings are over, despite the outcome. Sorokin, who reinvented herself as Anna Delvey, an heiress daughter of a make-believe Russian billionaire, started an organization called the “Anna Delvey Foundation” which she used to skip out on bills at hotels and loan payments around the city, signing up for overdrafts with more than one bank based on fraudulent bank statements. In 2017, Anna Sorokin was arrested and charged with ten counts of larceny, notifying her that her alleged crimes are a deportable offense as her thefts amount to over $275,000. Sorokin’s appearance in court was a dressed down version of her former persona, complete with sneakers and her hair pulled back in a ponytail. The Gothamist says that Sorokin, who is being called a “social grifter” is being held at Rikers Island after turning down a plea deal in December which would have allowed her to be deported immediately back to Germany. Several hoteliers are among the people expected to testify against Sorokin, who is being accused of staging the con, according to The New York Post. Sorokin’s story has drawn the interest of several actors, writers and directors who want to bring the tale to the big screen and/or Netflix, all based on features in the New Yorker from the time of the German national’s arrest.
Imaging diagnosis of mediastinal tumors and retroperitoneal space While working in various oncological hospitals in Kazakhstan, Ukraine and Russia took place to study diagnostic possibilities also go other x-ray method. If Kazakhstan (Kzyl-Orda regional Oncology Center) it was a normal x-ray and grafia, Ukraine (Kyiv n/and rentgenoradiologicheskij and the Cancer Institute, Kharkiv Research Institute of General and emergency surgery) already applied tomografo grafija, rentgeno kinematografija, double and triple contrast abdominal Neoplasms after the introduction of gas into the retroperitoneal space and mediastinum. In the Krasnodar region (krajonkodispanser, regional hospital, and emergency hospital) added to computed tomography. I must say that in all of these medical institutions concentrated a large number of patients with cancer of different localizations. This allowed us to make an objective assessment of all the listed beam diagnostic methods. Oppose them to each other, it would be a mistake and with economic and practical point of view. Should rationally use each of them by necessity.13 Gazokontrastnye methods are particularly important for the diagnosis of germination of cancer of the esophagus, pancreas, colon in the adjacent organs and tissues, as well as for identifying Mediastinum and retroperitoneal sarcomas space.25 Aggressiveness of esophageal cancer esophageal localization features explained in the mediastinum, close contact with zhiznennovazhnymi authorities, resulting in the rapid germination of cancer beyond its own tissues as well as metastasizing.17 Early detection saves patient tumors are sprouting not only from vain operations accelerates Cancer Metastasis, but allows timely resort to radiotherapy treatment and thereby prolong his life.4 Introduction While working in various oncological hospitals in Kazakhstan, Ukraine and Russia took place to study diagnostic possibilities also go other x-ray method. If Kazakhstan (Kzyl-Orda regional Oncology Center) it was a normal x-ray and grafia, Ukraine (Kyiv n/and rentgenoradiologicheskij and the Cancer Institute, Kharkiv Research Institute of General and emergency surgery) already applied tomografo grafija, rentgeno kinematografija, double and triple contrast abdominal Neoplasm's after the introduction of gas into the retroperitoneal space and mediastinum. In the Krasnodar region (krajonkodispanser, regional hospital, and emergency hospital) added to computed tomography. I must say that in all of these medical institutions concentrated a large number of patients with cancer of different localizations. This allowed us to make an objective assessment of all the listed beam diagnostic methods. Oppose them to each other, it would be a mistake and with economic and practical point of view. Should rationally use each of them by necessity. 1-3 Gazokontrastnye methods are particularly important for the diagnosis of germination of cancer of the esophagus, pancreas, colon in the adjacent organs and tissues, as well as for identifying Mediastinum and retroperitoneal sarcomas space. Aggressiveness of esophageal cancer esophageal localization features explained in the mediastinum, close contact with zhiznennovazhnymi authorities, resulting in the rapid germination of cancer beyond its own tissues as well as metastasizing. Early detection saves patient tumors are sprouting not only from vain operations accelerates Cancer Metastasis, but allows timely resort to radiotherapy treatment and thereby prolong his life. 4 Materials and methods The number of cancer patients treated in medical institutions referred to above, amounts to thousands, and in one article reflecting all nuances of radiation Diagnostics of all disease diseases is impossible. So for a detailed analysis of the effectiveness of radiation means selected only one disease is a cancer of the esophagus. The same cancer of the pancreas is considered only in the light of modern achievements of radio diagnostics. Of the 711 patients referred to a medical institution 18 of age to the age of 93. The youngest of them was a resident of Kazakhstan. Men were almost 2.5 times more than women. All of them were in a State of dramatic mental oppression, up to complete indifference to their fate. And, as a rule, it is easy to agree on any medical manipulations. During the initial screening and diagnosis of esophagus picture was installed at 591(83.1%) patient. But these techniques allowed catching only the brightest causes dysphagia is the nature and kind of narrowing of the authority inherent in one form or another, does not reflect the nature of the tumor growing in the adjacent organs and tissues. Before the advent of computer tomography, it couldn't be produced using gazokontrastnyh methods. The most common routes of gas in the mediastinum (usually carbon dioxide) are methods of Reeves and Kazan. When the first of them, gas in volume 1500-2000ml is injected into the retroperitoneal space using prikopchikovoj puncture. To do this, under local anesthesia and controlled by thumb, halfway between the tailbone and the anus is embedded in the needle behind prjamokishechnuju fiber 5-6 depth cm. On it slowly injected specified gas volume and patient forced to walk. Via 30-40 minutes mezhfascialnym gas spaces of retroperitoneal fiber penetrates the back mediastinum, shrouding the esophagus. The second gas (500-1000ml) injected into the anterior mediastinum puncture via jugular clipping. He then spreads throughout the sredosteniju. Beam study of esophagus, pancreas, and other organs and tissues are carried out via 30-40 minutes after this manipulation. The lower third of the esophagus cancer further resort to the introduction of gas (up to 1000ml) into the free abdomen, IE the laying of pneumoperitoneum, followed by a double or a triple contrasting his clearance. When double-patient took only barium suspension, while triple-he still fanned by those inside through a thin probe. These methods have dramatically intensified the contrast abdominal esophagus and Cardiac of the stomach, and in the application of computer tomography allow to clearly define and tumor invasion into adjacent tissues. These techniques were performed at 260 patients.. Of these, 184(70.7%) It was determined that the tumor operabelna and radical surgery was performed (Figure 1 & Figure 2). Currently, the leading role in the diagnosis of pancreatic cancer is owned by computed tomography. But, as previously speculated on the existence of the disease is based on observation of changes the contours of the stomach and duodenum when performing radiography of these bodies, i.e. their deformation by compression by the tumor. For this reason, tissue contrast enhancement-by introducing gas zabrjushinno and vnutribruchinno, is of great diagnostic value. However, without a Visual inspection of this gland and biopsy to resort to radical surgery is dangerous and with legal and moral side. Indeed, the increase in size of the body may be and in chronic psevdotumoroznom acute pancreatitis in which iron is not removed. Earlier, to avoid severe errors, sick even offered trial laparotomy. Currently replaced by laparoscopy. Computed tomography (CT), after the introduction of the gas in the specified space, was performed at 37 patients with suspected pancreatic cancer. That it has good information, verified by the example of operative treatment 13(35.1%) these patients-they have during the operation confirmed the tumor operabelnost, which was supposed to be following the execution of this type of radiation survey. The remaining 24(64.9%) patients experienced clear signs of cancer neoperabelnosti of that body. Results and discussion The results of the 260 patients, which was introduced by gas or in the mediastinum, or into the abdominal cavity, with the subsequent, or dual (introduction only barium masses in the lumen of the esophagus) or triple (introduction of barium and with inflation lumen body inside using probe) have been obtained the following results. At 27(13.8%) from 260 to exclude the germination of esophageal cancer in adjacent organs and tissues, and then perform the radical surgery. At 16(6.1%) patients with complaints bring on dysphagia, install benign nature of this symptom, including: 8-deviation of the esophagus, 3-like constriction, 4-diverticula. Thus, modern beam methods of diagnosis are an effective tool in identifying the causes of human frailty and to develop proper algorithm for his treatment. Conclusion Currently, determining operability cancerous tumors of the esophagus and pancreas is the most informative computer tomography, but it should be conducted after the introduction of gas into the mediastinum and zabrjushinnuju fiber. This increases the contrast of fabrics and allows you to define their mobility, which is an essential factor in the success of the surgery.
OAKLAND — A 14-time felon accused of using his senior citizens home as a base for selling narcotics is being prosecuted on four felony drug counts, according to court documents and authorities. Landry Daniels, 68, was arrested June 21 and charged June 23 with two counts of possession for sale of a controlled substance and two counts of transportation of a controlled substance for sale. He pleaded not guilty to the charges Wednesday and was told to appear Aug. 30 for a pre-trial hearing. Daniels is currently free on $60,000 bail. Daniels’ attorney did not return calls seeking comment. According to court documents, on the day Daniels was arrested, police seized about $20,000 worth of cocaine, black tar heroin and marijuana from his vehicle and apartment in the 66-unit Glen Brook Terrace Satellite Senior Homes 4030 Panama Court where he lived at the time. Police also seized $471 from Daniels they believe are proceeds from drug sales, according to court documents. Daniels is accused of selling drugs to non-residents in the parking area of the senior home, and he would also make deliveries, authorities said. He recently moved from the complex. A representative of the complex said in an email earlier this week that “Unfortunately we are unable to share any information at this time.” Authorities began their investigation several weeks ago after receiving anonymous tips about Daniels. According to the documents, undercover police officers saw him making suspected sales from his KIA SUV about 6:45 p.m. June 21 outside the apartment complex, which is near Piedmont Avenue. The buyers were arrested and Daniels was stopped by police in the 4000 block of Emery Street in Emeryville, according to court records. The SUV was searched, and in a leather bag in the center console, a plastic sandwich bag containing 14.1 grams of suspected cocaine and 5.6 grams of suspected black tar heroin was found, the documents state. The cash was found in one of Daniels’ pants pockets. A search warrant was served later at his apartment where 40 grams of suspected cocaine, 25.5 grams of suspected black tar heroin and 0.8 ounces of suspected marijuana were found, the documents say. According to court documents, Daniels’ prior convictions are between 1978 and 2012. They include convictions for drugs, burglary, petty theft with priors, and receiving stolen property. He received prison terms for nine of the convictions and was placed on probation for the others, including the most recent in August 2012 for burglary.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Fri Aug 2 10:45am 2019 Generating averaged power spectra for W = 1 to W = 300 for not demodulated J1231-1411 data! """ from __future__ import division, print_function import numpy as np import time from tqdm import tqdm import glob import subprocess import Lv0_dirs import Lv2_average_ps_methods,Lv2_average_merge_ps_methods import Lv3_detection_level import matplotlib.pyplot as plt """ for W in range(1,301): print('W = ' + str(W)) pyfile = 'Lv3_average_ps_main.py' pyfile_contents = open(pyfile,'r') contents = pyfile_contents.read().split('\n') pyfile_contents.close() newstring_W = " W = " + str(W) + " #number of consecutive Fourier bins to average over" newstring_filename = " pngname = '" + Lv0_dirs.NICERSOFT_DATADIR + 'merged_events/merged000005/W_dir/W_' + str(W).zfill(3) + ".png'" pyfile_contents = open('Lv3_average_ps_main.py','w') for j in range(len(contents)): if j != 51 and j != 123: pyfile_contents.write(contents[j] + '\n') if j == 51: pyfile_contents.write(newstring_W + '\n') if j == 123: pyfile_contents.write(newstring_filename + '\n') pyfile_contents.close() execfile("Lv3_average_ps_main.py") """ from scipy.optimize import curve_fit def neg_sqrt(x,a,n): return a*x**(n) stds = np.array([0.17151563,0.13250076,0.11201556,0.09895773,0.08971697,0.08257810, 0.07702619,0.07231257,0.06853172,0.06528318,0.06225732,0.05970226, 0.05745892,0.05547180,0.05373177,0.05203332,0.05061853,0.04919654, 0.04793032,0.04674205,0.04573258,0.04471104,0.04380930,0.04283883, 0.04198711,0.04135160,0.04051614,0.03974083,0.03923577,0.03858371, 0.03797808,0.03738553,0.03672342,0.03609305,0.03566748,0.03519816, 0.03472995,0.03436365,0.03391587,0.03346646,0.03316299,0.03278463, 0.03240944,0.03212927,0.03173375,0.03134329,0.03099390,0.03064317, 0.03035390,0.02997892,0.02994112,0.02945002,0.02934270,0.02907256, 0.02881691,0.02857167,0.02828932,0.02804486,0.02786123,0.02764023, 0.02733023,0.02713993,0.02689832,0.02676551,0.02647976,0.02632849, 0.02609446,0.02599551,0.02584631,0.02556697,0.02529887,0.02517167, 0.02501944,0.02495649,0.02467688,0.02468062,0.02440038,0.02431525, 0.02403045,0.02387878,0.02370795,0.02368280,0.02347985,0.02331529, 0.02332113,0.02317480,0.02305960,0.02309585,0.02284412,0.02265442, 0.02261264,0.02252684,0.02238650,0.02238772,0.02206224,0.02200986, 0.02188044,0.02183891,0.02172427,0.02149658]) N = np.arange(1,101) relation = stds[0]/np.sqrt(N) p,cov = curve_fit(neg_sqrt,N,stds,p0=[0.2,-0.5]) print(p[0],p[1],np.sqrt(cov)) plt.plot(N,stds,'rx-') plt.plot(N,relation,'bx-') plt.plot(N,neg_sqrt(N,p[0],p[1]),'kx-') plt.xlabel('N',fontsize=12) plt.ylabel('Standard Dev.',fontsize=12) plt.legend(('Actual','Expected','curvefit'),loc='best') plt.show()
// create a class public class PalindromeInteger { // create a method public static void main(String[] args) { // declare input and other necessities int inp = 4554; int rem, run = 0, org; // set input equal to original to allow input to be iterated // while preserving original for comparison later org = inp; // create a while loop to iterate the input while (inp >0) { // modulus 10 pulls last integer (tenth place) out of the inp rem = inp % 10; // multiplying run by 10 adds a tenth space (0) at the end to allow the addition of the rem run = (run * 10) + rem; // dividing inp by 10 removes the last integer off of the inp inp = inp / 10; } // compare using conditional statement for final result if(org == run) System.out.println(org + " is a Palindrome"); else System.out.println(org + "is not a Palindrome"); } }
1. Field of the Invention The present invention relates to an image forming method of correcting skew of a sheet picked up from a sheet retaining unit before forming an image on this sheet, and an image forming apparatus employing this method, and, more particularly, to an image forming method of surely accomplishing skew correction and an image forming apparatus employing this method. 2. Description of the Related Art Image forming apparatuses, such as a copying machine, a printer and a facsimile, employ a latent image forming type recording apparatus like an electrophotographing apparatus, due to a recent demand for image recording on normal sheets of paper. According to this image forming principle, after a photosensitive drum as a latent image carrier is precharged, the photosensitive drum is exposed to a light image to have an electrostatic latent image formed thereon. This electrostatic latent image is developed by a developing unit so that a toner image is formed on the photosensitive drum. This toner image is then transferred onto a sheet of paper. In this image forming apparatus, sheets on which an image is to be formed are retained in a sheet cassette, and should be picked up therefrom for a later imaging process. The sheets may be skewed when being picked up from the sheet cassette. When an image is formed on a skewed sheet, the image is also formed askew. To prevent it, the image forming apparatus is provided with a mechanism of correcting skew of the picked-up sheet. FIGS. 1A and 1B are explanatory diagram of prior art, the former showing the structure of the prior art and the latter presenting a time chart of the prior art. As shown in FIG. 1A, a sheet is picked up by a pickup roller 92 from a sheet cassette 91 retaining a plurality of sheets, and abuts against resist rollers 93. As the leading edge of the sheet abuts against the resist rollers 93, the sheet is bent and tends to return to the original form. This restoring force eliminates the skewing of the sheet. Thereafter, the resist rollers 93 are rotated to feed the sheet forward. A toner image formed on a photosensitive drum 90 of the image forming mechanism is transferred onto the sheet by a transfer roller 94. In this image forming apparatus, the image forming mechanism starts the image forming operation in accordance with the pickup of the sheet. The image forming apparatus therefore needs a mechanism for detecting the sheet pickup. Because foreign matter such as sheet powder is likely to be produced and a developer scatters in the apparatus which forms a toner image, an inexpensive and small photosensor cannot be used as the sheet detecting mechanism. Accordingly, a mechanical sheet sensor BS is used as the sheet detecting mechanism. According to the prior art, this sheet sensor BS is provided at the subsequent stage of the resist rollers 93 to detect a sheet feed through the resist rollers 93 so that the supply of the sheet to the image forming mechanism is checked. That is, as shown in FIG. 1B, when the pickup roller 92 is driven for a given time, a sheet abuts against the resist rollers 93 and skewing is thus considered as corrected. Then, the resist rollers 93 are driven to feed the sheet while the photosensitive drum 90 is rotated, and image writing by a laser beam starts, thus forming an image. At this time, if the sheet sensor BS does not detect any sheet within a given period of time after the driving of the resist rollers 93, the rotation of the resist rollers 93 and photosensitive drum 90, which have been driven, is stopped as indicated by the broken lines in FIG. 1B. But, the skew correction by the resist rollers 93 needs a constant and stable amount of bending of sheets (resist amount). Conventionally, this resist amount is determined by the expected amount of rotation of the pickup roller 92 on the premise that the picked-up sheet always abuts against the resist rollers 93. If the pickup roller 92 and a sheet slip on each other or a similar incident occurs at the time of sheet pickup, therefore, the resist amount of the sheet becomes non-uniform or insufficient so that the sheet may be fed forward without skew correction. Further, since non-arrival of the sheet at the resist rollers 93 due to sheet pickup failure or the like is detected by the sheet sensor BS, such detection cannot be accomplished before the driving of the resist rollers 93 and the photosensitive drum 90. This means the requirement of wasteful driving of the photosensitive drum, which is one factor to shorten the service life of the photosensitive drum.
<filename>samples/cortex/m3-iar/stm32f2xx/6-round-robin/src/pdev.h //****************************************************************************** //* //* FULLNAME: Single-Chip Microcontroller Real-Time Operating System //* //* NICKNAME: scmRTOS //* //* PROCESSOR: ARM Cortex-M3 //* //* TOOLKIT: EWARM (IAR Systems) //* //* PURPOSE: Peripheral Devices Header File //* //* Version: v5.2.0 //* //* //* Copyright (c) 2003-2021, scmRTOS Team //* //* Permission is hereby granted, free of charge, to any person //* obtaining a copy of this software and associated documentation //* files (the "Software"), to deal in the Software without restriction, //* including without limitation the rights to use, copy, modify, merge, //* publish, distribute, sublicense, and/or sell copies of the Software, //* and to permit persons to whom the Software is furnished to do so, //* subject to the following conditions: //* //* The above copyright notice and this permission notice shall be included //* in all copies or substantial portions of the Software. //* //* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, //* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF //* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. //* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, //* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH //* THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. //* //* ================================================================= //* Project sources: https://github.com/scmrtos/scmrtos //* Documentation: https://github.com/scmrtos/scmrtos/wiki/Documentation //* Wiki: https://github.com/scmrtos/scmrtos/wiki //* Sample projects: https://github.com/scmrtos/scmrtos-sample-projects //* ================================================================= //* //****************************************************************************** //* Cortex-M3/IAR sample by <NAME>, Copyright (c) 2015-2021 #ifndef PERIPHERAL_DEVICES_H #define PERIPHERAL_DEVICES_H #include <stdint.h> #include <scmRTOS.h> //------------------------------------------------------------------------------ // // Terminal device // struct TTerminalDevice { static const uint16_t TX_BUF_SIZE = 2048; static const uint16_t RX_BUF_SIZE = 128; static usr::ring_buffer<char, TX_BUF_SIZE, uint16_t> TxBuf; static char RxBuf[RX_BUF_SIZE]; static uint16_t RxIndex; static OS::TEventFlag NewLineIncoming; static void init(); INLINE static void send(const char c); static void send(const char *s); INLINE static void isr(); class TIsrLocker { public: INLINE TIsrLocker() { CR1 = USART2->CR1 & (USART_CR1_TXEIE | USART_CR1_RXNEIE); USART2->CR1 &= ~(USART_CR1_TXEIE | USART_CR1_RXNEIE); } INLINE ~TIsrLocker() { USART2->CR1 |= CR1; } private: uint32_t CR1; }; }; //------------------------------------------------------------------------------ #endif // PERIPHERAL_DEVICES_H
I knew it. Revisiting the 2016 gubernatorial election in North Carolina, those who have kept up with my writing on the topic know I was (and still am) incensed over not only the fact that North Carolina Republicans allowed a Democrat back into the governorship, but that Republican-led county election boards were dragging their feet about handling requested recounts. What’s more, the State Board of Elections was even worse, flatly refusing to investigate any of the multiple complaints of fraudulent activity with the ballots. We lost an amazing and effective governor in Pat McCrory, and in return, got this droopy mess named Roy Cooper, who, if not for the Republican majority in the General Assembly, would likely already be well on his way to proving himself the most corrupt, incompetent governor to ever inhabit the Governor’s Mansion in Raleigh (there’s still time). Now, there seems to be a bit of supporting evidence for former Governor Pat McCrory and those who wanted to see an investigation of what felt so strongly of a corrupted vote in the state. According to an audit released on Friday by the State Board of Elections, there were over 500 votes that they found which were cast illegally. From the Washington Examiner: Most of the 508 voters were active felons at the time of the election and only made up a small percentage of the total 4.8 million ballots cast. Another 24 ballots were believed to have been cast by individuals who voted multiple times. Several things are going on here that need to be noted. First of all, while 532 votes would not have turned the election, it is absolute proof that there was foul play in the 2016 election, just as many of us suspected. The Board of Elections dismissal of concerns of the citizens, especially in light of these new facts is nothing short of malpractice. An audit didn’t cover every county, so if they found over 500 in an audit, a more thorough investigation would likely reveal more. By “more,” I mean more felons voting, more people voting multiple times, more out of state voters… you name it. Which brings me to my second point. Weeks before the 2016 election, a liberal appeals court bent to the will of Democrats, who pushed their soft bigotry of low expectations on minorities in the Tar Heel state, and insisted that minorities had no clue how to get ID. To ask them to show ID at the voting booth was just oppressive and an attempt to stop the minority vote. Yeah, I don’t get that one, either. The state was even willing to help those who didn’t have an ID to get ID, but for Democrats, that wasn’t acceptable, and they fought against voter ID until it was overturned by the appeals court. If nothing else, this audit shows that there is a very real concern and that voter ID is needed to keep our elections fair and untainted. I will never just accept that Pat McCrory lost in 2016. There really were too many complaints of fraud that were ignored or swept aside, and now this audit is proving that those complaints may have had merit. Shame on our Board of Elections for not taking the integrity of North Carolina’s elections more seriously. Somewhere, I hope Pat McCrory feels a bit of vindication, even if it’s bittersweet.
<gh_stars>0 package eu.kalodiodev.controlmycar.services.jpa.security; import eu.kalodiodev.controlmycar.domains.User; import eu.kalodiodev.controlmycar.repositories.UserRepository; import org.springframework.context.annotation.Primary; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.core.userdetails.UserDetailsService; import org.springframework.security.core.userdetails.UsernameNotFoundException; import org.springframework.stereotype.Service; import java.util.Optional; @Primary @Service public class UserDetailsServiceImpl implements UserDetailsService { private final UserRepository userRepository; public UserDetailsServiceImpl(UserRepository userRepository) { this.userRepository = userRepository; } @Override public UserDetails loadUserByUsername(String email) throws UsernameNotFoundException { Optional<User> userOptional = userRepository.findByEmail(email); if (userOptional.isEmpty()) { throw new UsernameNotFoundException(email); } return userOptional.get(); } }
n=int(input()) s=input() a=set(list(s)) b=list(a) l=[] d=[] st="" b.sort() for i in b: l.append(s.count(i)) al=l[:] l.sort() for i in l: if(i%n!=0): print("-1") exit(0) if(l[0]>=n): k=''.join(b) #j=str(k) for f in range(len(b)): st=st+str(k[f]*(al[f]//n)) print(st*n) else: print("-1")
<reponame>AvengerF12/cpp-programming-s2 #pragma once #include <string> #include <vector> #include <iostream> using namespace std; class Player { private: string playerName; int bullAccuracy; int singleAccuracy; int playerScore; public: int gamesWon; int setsWon; int matchesWon; int nTurns; //getters string& getName(); int& getScore(); int& getBullAccuracy(); int& getSingleAccuracy(); //setters void setName(string& name); void setScore(int score); void setBullAccuracy(unsigned int bull_accuracy); void setSingleAccuracy(unsigned int single_accuracy); void setAllAccuracy(unsigned int all_accuracy); //throw functions int bull(const vector<int>& theDartboard); int singleThrow(const vector<int>& theDartboard, vector<int>::const_iterator bIter, unsigned int target); int doubleThrow(const vector<int>& theDartboard, vector<int>::const_iterator bIter, unsigned int target); int trebleThrow(const vector<int>& theDartboard, vector<int>::const_iterator bIter, unsigned int target); Player(); ~Player(); };
/** * The unit test for EntityQueryManagerImpl. * @author John * */ public class EntityQueryManagerImplTest { @Mock NodeQueryDaoV2 mockDao; @Mock NodeQueryDaoFactory nodeQueryDaoFactory; @Mock AuthorizationManager mockAuthorizationManager; @Mock UserInfo mockUser; EntityQueryManagerImpl manager; EntityQueryResult result; NodeQueryResults sampleResutls; BasicQuery query; Set<Long> queryBenefactorIds; Set<Long> authroizedBenefactorIds; List<Map<String, Object>> pageResult; long count; @Before public void before(){ MockitoAnnotations.initMocks(this); manager = new EntityQueryManagerImpl(); ReflectionTestUtils.setField(manager, "nodeQueryDaoFactory", nodeQueryDaoFactory); ReflectionTestUtils.setField(manager, "authorizationManager", mockAuthorizationManager); when(nodeQueryDaoFactory.createConnection()).thenReturn(mockDao); // Sample result = new EntityQueryResult(); result.setActivityId(null); result.setCreatedByPrincipalId(123L); result.setCreatedOn(new Date(1L)); result.setModifiedByPrincipalId(456L); result.setModifiedOn(new Date(2)); result.setEntityType(EntityType.table.name()); result.setEtag("etag"); result.setName("aName"); result.setParentId("syn99"); result.setVersionNumber(0L); result.setId("syn456"); result.setProjectId(888L); result.setBenefactorId(111L); query = new BasicQuery(); query.setFrom("project"); // default to non-admin when(mockUser.isAdmin()).thenReturn(false); queryBenefactorIds = Sets.newHashSet(1L,2L,3L); when(mockDao.getDistinctBenefactors(any(QueryModel.class), anyLong())).thenReturn(queryBenefactorIds); authroizedBenefactorIds = Sets.newHashSet(2L,1L); when(mockAuthorizationManager.getAccessibleBenefactors(mockUser, queryBenefactorIds)).thenReturn(authroizedBenefactorIds); pageResult = new LinkedList<Map<String,Object>>(); Map<String, Object> row = new HashMap<String, Object>(); row.put(NodeField.NAME.getFieldName(), "name1"); row.put(NodeField.ID.getFieldName(), 123L); row.put(NodeField.PARENT_ID.getFieldName(), 456L); pageResult.add(row); when(mockDao.executeQuery(any(QueryModel.class))).thenReturn(pageResult); count = 100; when(mockDao.executeCountQuery(any(QueryModel.class))).thenReturn(count); } @Test public void testTranslateEntityQueryResultRoundTrip(){ Map<String, Object> entityMap = toMap(result); EntityQueryResult clone = manager.translate(entityMap); assertEquals(result, clone); } @Test public void testTranslateValueString(){ StringValue sv = new StringValue(); String in = "a string"; sv.setValue(in); String out = (String) manager.translateValue(sv); assertEquals(in, out); } @Test public void testTranslateValueDate(){ DateValue value = new DateValue(); Date in = new Date(99); value.setValue(in); Long out = (Long) manager.translateValue(value); assertEquals(new Long(in.getTime()), out); } @Test public void testTranslateValueInteger(){ IntegerValue value = new IntegerValue(); Long in = 99L; value.setValue(in); Long out = (Long) manager.translateValue(value); assertEquals(in, out); } @Test public void testTranslateListSizeOne(){ List<Value> list = new ArrayList<Value>(1); StringValue sv = new StringValue(); String in = "a string"; sv.setValue(in); list.add(sv); String out = (String) manager.translateValue(list); assertEquals(in, out); } @Test public void testTranslateListMoreThanOne(){ List<Value> list = new ArrayList<Value>(2); //1 StringValue sv = new StringValue(); String in1 = "one"; sv.setValue(in1); list.add(sv); //2 StringValue sv2 = new StringValue(); String in2 = "two"; sv2.setValue(in2); list.add(sv2); // Should be list of stringss List<String> out = (List<String>) manager.translateValue(list); assertEquals(Arrays.asList(in1, in2), out); } /** * Helper to create map for a result * @param results * @return */ private Map<String, Object> toMap(EntityQueryResult results){ Map<String, Object> map = new HashMap<String, Object>(); map.put(EntityFieldName.id.name(), results.getId()); map.put(EntityFieldName.name.name(), result.getName()); map.put(EntityFieldName.parentId.name(), results.getParentId()); map.put(EntityFieldName.eTag.name(), results.getEtag()); map.put(EntityFieldName.createdOn.name(), result.getCreatedOn().getTime()); map.put(EntityFieldName.createdByPrincipalId.name(), results.getCreatedByPrincipalId()); map.put(EntityFieldName.modifiedOn.name(), results.getModifiedOn().getTime()); map.put(EntityFieldName.modifiedByPrincipalId.name(), results.getModifiedByPrincipalId()); map.put(EntityFieldName.activityId.name(), results.getActivityId()); map.put(EntityFieldName.versionNumber.name(), results.getVersionNumber()); map.put(EntityFieldName.benefactorId.name(), results.getBenefactorId()); map.put(EntityFieldName.projectId.name(), results.getProjectId()); EntityType type = EntityType.valueOf(results.getEntityType()); map.put("nodeType", type.name()); return map; } @Test public void testExecuteQueryNonAdminSelectStar(){ when(mockUser.isAdmin()).thenReturn(false); // call under test NodeQueryResults results = manager.executeQuery(query, mockUser); assertNotNull(results); assertEquals(count, results.getTotalNumberOfResults()); assertNotNull(results.getResultIds()); assertEquals(1, results.getResultIds().size()); assertEquals("syn123", results.getResultIds().get(0)); assertNotNull(results.getAllSelectedData()); assertEquals(1, results.getAllSelectedData().size()); Map<String, Object> row = results.getAllSelectedData().get(0); assertNotNull(row); assertEquals("name1",row.get(NodeField.NAME.getFieldName())); ArgumentCaptor<QueryModel> queryCapture = ArgumentCaptor.forClass(QueryModel.class); // call to lookup the benefactors for the query. verify(mockDao).getDistinctBenefactors(queryCapture.capture(), eq(EntityQueryManagerImpl.MAX_BENEFACTORS_PER_QUERY+1)); QueryModel model = queryCapture.getValue(); assertNotNull(model); // The from clause should have been changed to a condition. ExpressionList where = model.getWhereClause(); assertNotNull(where); List<SqlExpression> expressions = where.getExpressions(); assertNotNull(expressions); assertEquals(2, expressions.size()); SqlExpression expression = expressions.get(0); assertEquals("E.TYPE",expression.getLeftHandSide().toSql()); assertEquals("project",expression.getRightHandSide()); // call to get the sub-set of benefactorIds that the user can see. verify(mockAuthorizationManager).getAccessibleBenefactors(mockUser, queryBenefactorIds); // Capture the final query queryCapture = ArgumentCaptor.forClass(QueryModel.class); verify(mockDao).executeQuery(queryCapture.capture()); model = queryCapture.getValue(); assertNotNull(model); // another expression should be added to the query where = model.getWhereClause(); assertNotNull(where); expressions = where.getExpressions(); assertNotNull(expressions); assertEquals(3, expressions.size()); // the last expression should be a filter on the benefactors the user can see. expression = expressions.get(2); assertEquals("E.BENEFACTOR_ID",expression.getLeftHandSide().toSql()); assertEquals(Comparator.IN, expression.getCompare()); assertEquals(authroizedBenefactorIds, expression.getRightHandSide()); // This is a select * query so the annotations should be added. verify(mockDao).addAnnotationsToResults(pageResult); verify(mockDao).executeCountQuery(model); } @Test public void testExecuteQueryAdmin(){ // setup an admin when(mockUser.isAdmin()).thenReturn(true); // call under test NodeQueryResults results = manager.executeQuery(query, mockUser); assertNotNull(results); // benefactor lookup should not occur verify(mockDao, never()).getDistinctBenefactors(any(QueryModel.class), anyLong()); // the accessible benefactors should not be called. verify(mockAuthorizationManager, never()).getAccessibleBenefactors(any(UserInfo.class), anySetOf(Long.class)); } @Test public void testExecuteQueryNotSelectStar(){ // not a select * query.setSelect(Lists.newArrayList("foo")); // call under test NodeQueryResults results = manager.executeQuery(query, mockUser); assertNotNull(results); // annotations should only be added for select * verify(mockDao, never()).addAnnotationsToResults(anyList()); } @Test (expected=IllegalArgumentException.class) public void testExecuteQueryOverLimit(){ query.setLimit(EntityQueryManagerImpl.MAX_LIMIT+1); // call under test manager.executeQuery(query, mockUser); } @Test public void testExecuteQueryScopeTooBroad(){ //setup too many benefactor ids queryBenefactorIds = new HashSet<>(); for(long id =0; id<EntityQueryManagerImpl.MAX_BENEFACTORS_PER_QUERY+1; id++){ queryBenefactorIds.add(id); } when(mockDao.getDistinctBenefactors(any(QueryModel.class), anyLong())).thenReturn(queryBenefactorIds); // call under test try{ manager.executeQuery(query, mockUser); fail(); }catch(IllegalArgumentException e){ assertEquals(EntityQueryManagerImpl.SCOPE_IS_TOO_BROAD, e.getMessage()); } } @Test public void testExecuteQueryNoBenefactors(){ // return an empty set when(mockAuthorizationManager.getAccessibleBenefactors(mockUser, queryBenefactorIds)).thenReturn(new HashSet<Long>()); NodeQueryResults results = manager.executeQuery(query, mockUser); assertNotNull(results); assertNotNull(results.getAllSelectedData()); assertTrue(results.getAllSelectedData().isEmpty()); assertNotNull(results.getResultIds()); assertTrue(results.getResultIds().isEmpty()); assertEquals(0L, results.getTotalNumberOfResults()); // the query should not be executed. verify(mockDao, never()).executeQuery(any(QueryModel.class)); verify(mockDao, never()).executeCountQuery(any(QueryModel.class)); verify(mockDao, never()).addAnnotationsToResults(anyList()); } /** * Test for PLFM-4367 */ @Test public void testExecuteQueryNoResults(){ // return no results when(mockDao.executeQuery(any(QueryModel.class))).thenReturn(new LinkedList<Map<String,Object>>()); // call under test NodeQueryResults results = manager.executeQuery(query, mockUser); assertNotNull(results); // annotations should not be added, since the list is empty. verify(mockDao, never()).addAnnotationsToResults(anyList()); } }
try: from PIL import Image except ModuleNotFoundError: import pip._internal as pip pip.main(['install', "PIL"]) from PIL import Imagezz import os resources_dir = os.path.join(os.path.dirname(__file__)) image = Image.open(os.path.join(resources_dir, 'A4kr.png')) width, height = image.size w = 5 # number of columns h = 4 # number of rows names = list(range(1, 21)) os.makedirs(os.path.join(resources_dir, 'images'), exist_ok=True) for j in range(h): for i in range(w): cropped = image.crop((i * width / w, j * height / h, (i + 1) * width / w, (j + 1) * height / h)) cropped.save(os.path.join(resources_dir, 'images',str(names.pop(0)).zfill(3) + '.png'))
export { FirstAid as default } from "../../";
/********************* */ /*! \file ProjectedSteepestEdge.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz, Duligur Ibeling ** This file is part of the Marabou project. ** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. ** All rights reserved. See the file COPYING in the top-level source ** directory for licensing information.\endverbatim ** ** [[ Add lengthier description here ]] **/ #include "Debug.h" #include "FloatUtils.h" #include "ITableau.h" #include "MStringf.h" #include "ProjectedSteepestEdge.h" #include "MarabouError.h" #include "Statistics.h" #include "TableauRow.h" ProjectedSteepestEdgeRule::ProjectedSteepestEdgeRule() : _referenceSpace( NULL ) , _gamma( NULL ) , _work1( NULL ) , _work2( NULL ) , _AColumn( NULL ) , _iterationsUntilReset( GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET ) , _errorInGamma( 0.0 ) { } ProjectedSteepestEdgeRule::~ProjectedSteepestEdgeRule() { freeIfNeeded(); } void ProjectedSteepestEdgeRule::freeIfNeeded() { if ( _referenceSpace ) { delete[] _referenceSpace; _referenceSpace = NULL; } if ( _gamma ) { delete[] _gamma; _gamma = NULL; } if ( _work1 ) { delete[] _work1; _work1 = NULL; } if ( _work2 ) { delete[] _work2; _work2 = NULL; } } void ProjectedSteepestEdgeRule::initialize( const ITableau &tableau ) { freeIfNeeded(); _n = tableau.getN(); _m = tableau.getM(); _referenceSpace = new char[_n]; if ( !_referenceSpace ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "ProjectedSteepestEdgeRule::referenceSpace" ); _gamma = new double[_n - _m]; if ( !_gamma ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "ProjectedSteepestEdgeRule::gamma" ); _work1 = new double[_m]; if ( !_work1 ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "ProjectedSteepestEdgeRule::work1" ); _work2 = new double[_m]; if ( !_work2 ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "ProjectedSteepestEdgeRule::work2" ); resetReferenceSpace( tableau ); } void ProjectedSteepestEdgeRule::resetReferenceSpace( const ITableau &tableau ) { memset( _referenceSpace, 0, _n * sizeof(char) ); for ( unsigned i = 0; i < _n - _m; ++i ) { _gamma[i] = 1.0; _referenceSpace[tableau.nonBasicIndexToVariable( i )] = 1; } _iterationsUntilReset = GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET; _errorInGamma = 0.0; if ( _statistics ) _statistics->pseIncNumResetReferenceSpace(); } bool ProjectedSteepestEdgeRule::select( ITableau &tableau, const List<unsigned> &candidates, const Set<unsigned> &excluded ) { List<unsigned> remainingCandidates = candidates; List<unsigned>::iterator it = remainingCandidates.begin(); while ( it != remainingCandidates.end() ) { if ( excluded.exists( *it ) ) it = remainingCandidates.erase( it ); else ++it; } if ( remainingCandidates.empty() ) { PSE_LOG( "No candidates, select returning false" ); return false; } // Obtain the cost function const double *costFunction = tableau.getCostFunction(); /* Apply the steepest edge rule: iterate over the candidates and pick xi for which the value of costFunction[i]^2 ----------------- gamma[i] is maximal. */ it = remainingCandidates.begin(); unsigned bestCandidate = *it; double gammaValue = _gamma[*it]; double bestValue = ( gammaValue < DBL_EPSILON ) ? 0 : ( costFunction[*it] * costFunction[*it] ) / gammaValue; ++it; while ( it != remainingCandidates.end() ) { unsigned contender = *it; gammaValue = _gamma[*it]; double contenderValue = ( gammaValue < DBL_EPSILON ) ? 0 : ( costFunction[*it] * costFunction[*it] ) / gammaValue; if ( contenderValue > bestValue ) { bestCandidate = contender; bestValue = contenderValue; } ++it; } tableau.setEnteringVariableIndex( bestCandidate ); if ( _statistics ) _statistics->pseIncNumIterations(); return true; } void ProjectedSteepestEdgeRule::prePivotHook( const ITableau &tableau, bool fakePivot ) { PSE_LOG( "PrePivotHook called" ); // If the pivot is fake, gamma does not need to be updated if ( fakePivot ) { PSE_LOG( "PrePivotHook done - fake pivot" ); return; } // When this hook is called, the entering and leaving variables have // already been determined. unsigned entering = tableau.getEnteringVariable(); unsigned enteringIndex = tableau.variableToIndex( entering ); unsigned leaving = tableau.getLeavingVariable(); unsigned leavingIndex = tableau.variableToIndex( leaving ); ASSERT( entering != leaving ); const double *changeColumn = tableau.getChangeColumn(); const TableauRow &pivotRow = *tableau.getPivotRow(); // Update gamma[entering] to the accurate value, taking the pivot into account double accurateGamma; _errorInGamma = computeAccurateGamma( accurateGamma, tableau ); _gamma[enteringIndex] = accurateGamma / ( changeColumn[leavingIndex] * changeColumn[leavingIndex] ); unsigned m = tableau.getM(); unsigned n = tableau.getN(); // Auxiliary variables double r, s, t1, t2; // Compute GLPK's u vector for ( unsigned i = 0; i < m; ++i ) { unsigned basicVariable = tableau.basicIndexToVariable( i ); if ( _referenceSpace[basicVariable] ) _work1[i] = -changeColumn[i]; else _work1[i] = 0.0; } tableau.backwardTransformation( _work1, _work2 ); // Update gamma[i] for all i != enteringIndex for ( unsigned i = 0; i < n - m; ++i ) { if ( i == enteringIndex ) continue; if ( ( -GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE < pivotRow[i] ) && ( pivotRow[i] < +GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE ) ) continue; r = pivotRow[i] / -changeColumn[leavingIndex]; /* compute inner product s[j] = N'[j] * u, where N[j] = A[k] * is constraint matrix column corresponding to xN[j] */ unsigned nonBasic = tableau.nonBasicIndexToVariable( i ); _AColumn = tableau.getSparseAColumn( nonBasic ); s = 0.0; for ( const auto &entry : *_AColumn ) s += entry._value * _work2[entry._index]; /* compute new gamma[j] */ t1 = _gamma[i] + r * ( r * accurateGamma + s + s ); t2 = ( ( _referenceSpace[nonBasic] ? 1.0 : 0.0 ) + ( ( _referenceSpace[entering] ? 1.0 : 0.0 ) * r * r ) ); _gamma[i] = ( t1 > t2 ? t1 : t2 ); } PSE_LOG( "PrePivotHook done" ); } double ProjectedSteepestEdgeRule::computeAccurateGamma( double &accurateGamma, const ITableau &tableau ) { unsigned entering = tableau.getEnteringVariable(); unsigned enteringIndex = tableau.variableToIndex( entering ); unsigned m = tableau.getM(); const double *changeColumn = tableau.getChangeColumn(); // Is the entering variable in the reference space? accurateGamma = _referenceSpace[entering] ? 1.0 : 0.0; for ( unsigned i = 0; i < m; ++i ) { unsigned basic = tableau.basicIndexToVariable( i ); if ( _referenceSpace[basic] ) accurateGamma += ( changeColumn[i] * changeColumn[i] ); } return FloatUtils::abs( accurateGamma - _gamma[enteringIndex] ) / ( 1.0 + accurateGamma ); } void ProjectedSteepestEdgeRule::postPivotHook( const ITableau &tableau, bool fakePivot ) { PSE_LOG( "PostPivotHook called" ); // If the pivot is fake, no need to reset the reference space. if ( fakePivot ) { PSE_LOG( "PostPivotHook done - fake pivot" ); return; } // If the iteration limit has been exhausted, reset the reference space --_iterationsUntilReset; if ( _iterationsUntilReset <= 0 ) { PSE_LOG( "PostPivotHook reseting ref space (iterations)" ); resetReferenceSpace( tableau ); return; } // If the error is too great, reset the reference space. if ( _errorInGamma > GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD ) { PSE_LOG( Stringf( "PostPivotHook reseting ref space (degradation). Error = %.15lf", _errorInGamma ).ascii() ); resetReferenceSpace( tableau ); return; } PSE_LOG( "PostPivotHook done (ref space not reset)" ); } void ProjectedSteepestEdgeRule::resizeHook( const ITableau &tableau ) { initialize( tableau ); } double ProjectedSteepestEdgeRule::getGamma( unsigned index ) const { return _gamma[index]; } // // Local Variables: // compile-command: "make -C ../.. " // tags-file-name: "../../TAGS" // c-basic-offset: 4 // End: //
package me.steven.indrev.mixin.client; import net.minecraft.client.recipebook.ClientRecipeBook; import net.minecraft.client.recipebook.RecipeBookGroup; import net.minecraft.recipe.Recipe; import org.spongepowered.asm.mixin.Mixin; import org.spongepowered.asm.mixin.injection.At; import org.spongepowered.asm.mixin.injection.Inject; import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable; @Mixin(ClientRecipeBook.class) public class MixinClientRecipeBook { @Inject(method = "getGroupForRecipe", at = @At("HEAD"), cancellable = true) private static void indrev_suppressUnknownRecipeType(Recipe<?> recipe, CallbackInfoReturnable<RecipeBookGroup> cir) { if (recipe.getId().getNamespace().equals("indrev")) cir.setReturnValue(RecipeBookGroup.UNKNOWN); } }
Illinois collects more than $3,000 per capita in state and local taxes each year, one of the highest per capita tax revenues. Yet, the state's fiscal management system does not appear to be operating optimally, which is the main reason it ranks as the second worst-run state. For example, Illinois has one of the smallest rainy day funds compared to other states, at 1% of its general annual budget -- an indication the state may not be able to satisfy its short-term obligations. Illinois' debt is equal to more than three-fourths of its annual revenue, also one of the highest shares in the nation. Similarly, the state's pension fund is not financially healthy. The state only has assets on hand to meet 39% of its pension obligations, the lowest ratio of any state. Perhaps as a result of the state's finances, Illinois has the worst credit rating and outlook from S&P and Moody's of any state. The housing market in Illinois is also struggling. One in every 73 housing units is in some state of the foreclosure process, nearly the highest foreclosure rate in the country. As is often the case in states with particularly high foreclosure rates, home prices in Illinois have dropped by more than 10% from 2010 through last year. This decline was the worst in the country during that time.
Poly(3-Hydroxybutyrate) (PHB) Depolymerase PhaZa1 Is Involved in Mobilization of Accumulated PHB in Ralstonia eutropha H16 ABSTRACT The recently finished genome sequence of Ralstonia eutropha H16 harbors nine genes that are thought to encode functions for intracellular depolymerization (mobilization) of storage poly(3-hydroxybutyrate) (PHB). Based on amino acid similarities, the gene products belong to four classes (PhaZa1 to PhaZa5, PhaZb, PhaZc, and PhaZd1/PhaZd2). However, convincing direct evidence for the in vivo roles of the gene products is poor. In this study, we selected four candidate genes (phaZa1, phaZb, phaZc, and phaZd1) representing the four classes and investigated the physiological function of the gene products (i) with recombinant Escherichia coli strains and (ii) with R. eutropha null mutants. Evidence for weak but significant PHB depolymerase activity was obtained only for PhaZa1. The physiological roles of the other potential PHB depolymerases remain uncertain.
<reponame>SteamedBunZL/wy<gh_stars>1-10 package com.whoyao.model; public class FriendAgreeOrDisAgreeTModel { private int frienduserid; private int isagree; private int messcontentid; public int getFrienduserid() { return frienduserid; } public void setFrienduserid(int frienduserid) { this.frienduserid = frienduserid; } public int getIsagree() { return isagree; } public void setIsagree(int isagree) { this.isagree = isagree; } public int getMesscontentid() { return messcontentid; } public void setMesscontentid(int messcontentid) { this.messcontentid = messcontentid; } }
import torch import torch.nn as nn import tridepth_renderer.cuda.rasterize as rasterize_cuda from . import vertices_to_faces, rasterize_image def flip(x, dim): """ Flip tensor in specified dimension. """ indices = [slice(None)] * x.dim() indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long).cuda() return x[tuple(indices)] class Renderer(nn.Module): def __init__(self, render_size=(228, 304)): super(Renderer, self).__init__() # Rendering size (output size) self.render_size = render_size # Other parameters self.anti_aliasing = True self.background_color = [0, 0, 0] self.fill_back = False self.dist_coeffs = torch.cuda.FloatTensor([[0., 0., 0., 0., 0.]]) # light self.light_intensity_ambient = 0.5 self.light_intensity_directional = 0.5 self.light_color_ambient = [1, 1, 1] self.light_color_directional = [1, 1, 1] self.light_direction = [0, 1, 0] # rasterization self.rasterizer_eps = 1e-3 def project_cam_to_depthmap(self, verts_3d, intrinsics, orig_size, eps=1e-7): '''Convert verts from 3D-pointcloud-format to 2.5D-depthmap-format (projection) range=[0,1] Args: verts: [B,N,3] (3D pointcloud format) intrinsics: [B,3,3] orig_size: (height, width) ''' x, y, z = verts_3d[:, :, 0], verts_3d[:, :, 1], verts_3d[:, :, 2] x_ = x / (z + eps) y_ = y / (z + eps) verts_depth = torch.stack([x_, y_, torch.ones_like(z)], dim=-1) verts_depth = torch.matmul(verts_depth, intrinsics.transpose(1, 2)) u, v = verts_depth[:, :, 0], verts_depth[:, :, 1] # map u,v from [0, img_size] to [0, 1] to use by the renderer u = (u / orig_size[1]).clamp(min=0, max=1) v = (v / orig_size[0]).clamp(min=0, max=1) vertices = torch.stack([u, v, z], dim=-1) return vertices def convert_depthmap_to_cam(self, verts_3d, intrinsics, orig_size): """Converts from depthmap-format to 3D-pointcloud-format (camera coords) Args: verts_3d: [B,N,3] (3D pointcloud format) intrinsics: [B,3,3] orig_size: (height, width) """ # Change scale [0,1] -> [0, img_size] verts_depth = verts_3d[:, :, 2:].transpose(2, 1) # [B,1,N] verts_3d_cam = torch.cat((verts_3d[:, :, :1] * orig_size[1], verts_3d[:, :, 1:2] * orig_size[0], torch.ones_like(verts_3d[:, :, 2:])), 2) verts_3d_cam = verts_3d_cam.transpose(2, 1) # [B,3,N] # Conver to camera coords verts_3d_cam = (intrinsics.inverse() @ verts_3d_cam) * verts_depth # [B,3,N] return verts_3d_cam.transpose(2, 1) def _transform_depthmap(self, verts_3d, intrinsics, R_mat, t_mat, orig_size): """Reproject depthmap format verts basd on the Camera transform matrix (R/t) Args: verts: [B,N,3] (depthmap format range=[0,1]) intrinsics: [B,3,3] R_mat: [B,3,3] t_mat: [B,3,1] orig_size: (height, width) Note: Currently image -> texture converting is not implemented yet. So, when you render rgb img (for 2-view-sfm), it would be better to use torch.grid_sample(). (Recommended for depthmap/silhouette rendering) """ # Convert from depthmap to camera coords verts_3d_cam = self.convert_depthmap_to_cam(verts_3d, intrinsics, orig_size) verts_3d_cam = verts_3d_cam.transpose(2, 1) # [B,3,N] # Camera transform (R_mat & t_mat) pose_mat = torch.cat([R_mat, t_mat], dim=2) # [B,3,4] verts_3d_ones = torch.ones_like(verts_3d_cam[:, :1, :]) # [B,1,N] verts_3d_cam_hom = torch.cat((verts_3d_cam, verts_3d_ones), 1) # [B,4,N] verts_3d_cam2 = (pose_mat @ verts_3d_cam_hom).transpose(2, 1) # [B,N,3] # Project into pixel-coords as depthmap format verts_3d_depth2 = self.project_cam_to_depthmap(verts_3d_cam2, intrinsics, orig_size) return verts_3d_depth2 def forward(self, verts, faces, textures=None, intrinsics=None, R_mat=None, t_mat=None, mode=["rgb", "depth", "silhouette"], render_size=None): """Implementation of forward rendering methods. You should specify the rendering mode from [scene, silhouette, depth, face_index]. """ # Check batchsize assert verts.shape[0] == faces.shape[0], \ "batchsize is not same between verts and faces" if "face_index" in mode: assert (intrinsics is None) and (R_mat is None) and (t_mat is None), \ "K/R/t is not necessary in face_index-rendering-mode" return self._render_face_index_map(verts, faces, render_size) elif ("rgb" in mode) or ("depth" in mode) or ("silhouette" in mode): return self._render(verts, faces, textures, render_size, mode, intrinsics=intrinsics, R_mat=R_mat, t_mat=t_mat) else: raise ValueError( "Choose mode from [None, 'silhouettes', 'depth', 'face_index']") def _render(self, verts, faces, textures=None, render_size=None, mode=["rgb", "depth", "silhouette"], intrinsics=None, R_mat=None, t_mat=None): """Rendering depth images from 3d mesh (which is depthmap format) Args: verts: [B,N,3(uvd)] (depthmap format) You need to concat verts_depths with verts_2d. range should be [0,1] faces: [B,M,3] (index pairs of face) textures: render_size: Specify the output size in the form of tuple (height, width) mode (str): Choose from ['rgb','depth','silhouette'] intrinsics: [B,3,3] R_mat: [B,3,3] t_mat: [B,3,1] Returns: rendered_depths : [B,1,H,W] """ assert verts.shape[2] == 3, "This function can deal with only 3d mesh.(depthmap format)" # Fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() if render_size is None: render_size = self.render_size # Prepare elements img_max_size = max(render_size) height, width = render_size # If intrinsics/R_mat/t_mat are specified, reproject vertices on the other viewpoint. if (intrinsics is not None) and (R_mat is not None) and (t_mat is not None): verts = self._transform_depthmap(verts, intrinsics, R_mat, t_mat, render_size) # Resize verts [0,1] -> [-1,1] (You need to pay attention to scale!) verts = torch.cat(((verts[:, :, :1] * width / img_max_size) * 2.0 - 1.0, (verts[:, :, 1:2] * height / img_max_size) * 2.0 - 1.0, verts[:, :, 2:]), 2) faces = vertices_to_faces(verts, faces) # Rasterization render_dic = rasterize_image(faces, textures, img_max_size, self.anti_aliasing, mode=mode) # Final adjustment if "rgb" in mode: render_rgbs = flip(render_dic["rgb"], 2)[:, :, :height, :width] render_dic["rgb"] = render_rgbs.contiguous() if "depth" in mode: render_depths = flip(render_dic["depth"], 1)[:, :height, :width].unsqueeze(1) render_dic["depth"] = render_depths.contiguous() if "silhouette" in mode: render_silhouettes = flip(render_dic["silhouette"], 1)[:, :height, :width].unsqueeze(1) render_dic["silhouette"] = render_silhouettes.contiguous() return render_dic def _render_face_index_map(self, verts, faces, render_size=None): """Rendering face_index_map from 2d mesh for creating face silhouettes. Args: verts: [B,N,2(uv)] 2D mesh extracted from scene image. faces: [B,M,3] (index pairs of face) render_size: Specify the output size in the form of tuple (height, width) Returns: face_index_map: [B,H,W] (pixel value means face idx in [1,M_max]) """ # Fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() if render_size is None: render_size = self.render_size # Add z-axis to verts ([B,N_max,2]->[B,N_max,3]) if verts.shape[2] == 2: z_verts = torch.ones_like(verts[:, :, :1]) verts = torch.cat((verts, z_verts), 2) # [B,N_max,3] # Prepare elements for rasterization batch_size = faces.shape[0] img_max_size = max(render_size) height, width = render_size # Resize verts [0,1] -> [-1,1] (You need to pay attention to scale!) verts = torch.cat(((verts[:, :, :1] * width / img_max_size) * 2.0 - 1.0, (verts[:, :, 1:2] * height / img_max_size) * 2.0 - 1.0, verts[:, :, 2:]), 2) faces = vertices_to_faces(verts, faces) # Prepare other elements face_index_map = torch.cuda.IntTensor(batch_size, img_max_size, img_max_size).fill_(-1) weight_map = torch.cuda.FloatTensor(batch_size, img_max_size, img_max_size, 3).fill_(0.0) depth_map = torch.cuda.FloatTensor(batch_size, img_max_size, img_max_size).fill_(100.0) # self.far face_inv_map = torch.cuda.FloatTensor(1).fill_(0) faces_inv = torch.zeros_like(faces) # Face index rasterization face_index_map, _, _, _ = rasterize_cuda.forward_face_index_map(faces, face_index_map, weight_map, depth_map, face_inv_map, faces_inv, img_max_size, False, False, False) # Change pixel value in background area (-1 -> 0) face_index_map = face_index_map + 1 return face_index_map[:, :height, :width].contiguous()
Does Feedback Control Always Reduce Entropy/Communication Requirement in Smart Grid? In cyber physical systems (CPSs) such as smart grids, feedback control confines the system state around the desired one. To monitor the system state, communications are needed to convey the system observations. Entropy is used to bridge the control and communications, since the control action results in a low entropy system. Meanwhile communications arise from positive entropy (i.e., uncertainty), and a lower entropy information source requires less communications for description. However, it is not clear whether the control action always reduces the entropy (thus communications). Hence, two types of entropy reductions, namely reduction in time and reduction when compared with open loop control, are studied. Sufficient conditions for entropy reduction and increase are derived, respectively. Numerical results show that the feedback control reduces the entropy, thus the communication requirement, in typical setups of smart grids. However, there also exist situations in which the feedback control increases entropy, thus demanding more communications.
package citrixadc import ( "github.com/citrix/adc-nitro-go/resource/config/vpn" "github.com/hashicorp/terraform/helper/schema" "fmt" "log" ) func resourceCitrixAdcVpnpcoipprofile() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, Create: createVpnpcoipprofileFunc, Read: readVpnpcoipprofileFunc, Update: updateVpnpcoipprofileFunc, Delete: deleteVpnpcoipprofileFunc, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, Required: true, Computed: false, ForceNew: true, }, "conserverurl": &schema.Schema{ Type: schema.TypeString, Required: true, Computed: false, }, "icvverification": &schema.Schema{ Type: schema.TypeString, Optional: true, Computed: true, }, "sessionidletimeout": &schema.Schema{ Type: schema.TypeInt, Optional: true, Computed: true, }, }, } } func createVpnpcoipprofileFunc(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] citrixadc-provider: In createVpnpcoipprofileFunc") client := meta.(*NetScalerNitroClient).client vpnpcoipprofileName := d.Get("name").(string) vpnpcoipprofile := vpn.Vpnpcoipprofile{ Conserverurl: d.Get("conserverurl").(string), Icvverification: d.Get("icvverification").(string), Name: d.Get("name").(string), Sessionidletimeout: d.Get("sessionidletimeout").(int), } _, err := client.AddResource("vpnpcoipprofile", vpnpcoipprofileName, &vpnpcoipprofile) if err != nil { return err } d.SetId(vpnpcoipprofileName) err = readVpnpcoipprofileFunc(d, meta) if err != nil { log.Printf("[ERROR] netscaler-provider: ?? we just created this vpnpcoipprofile but we can't read it ?? %s", vpnpcoipprofileName) return nil } return nil } func readVpnpcoipprofileFunc(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] citrixadc-provider: In readVpnpcoipprofileFunc") client := meta.(*NetScalerNitroClient).client vpnpcoipprofileName := d.Id() log.Printf("[DEBUG] citrixadc-provider: Reading vpnpcoipprofile state %s", vpnpcoipprofileName) data, err := client.FindResource("vpnpcoipprofile", vpnpcoipprofileName) if err != nil { log.Printf("[WARN] citrixadc-provider: Clearing vpnpcoipprofile state %s", vpnpcoipprofileName) d.SetId("") return nil } d.Set("name", data["name"]) d.Set("conserverurl", data["conserverurl"]) d.Set("icvverification", data["icvverification"]) d.Set("name", data["name"]) d.Set("sessionidletimeout", data["sessionidletimeout"]) return nil } func updateVpnpcoipprofileFunc(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] citrixadc-provider: In updateVpnpcoipprofileFunc") client := meta.(*NetScalerNitroClient).client vpnpcoipprofileName := d.Get("name").(string) vpnpcoipprofile := vpn.Vpnpcoipprofile{ Name: d.Get("name").(string), } hasChange := false if d.HasChange("conserverurl") { log.Printf("[DEBUG] citrixadc-provider: Conserverurl has changed for vpnpcoipprofile %s, starting update", vpnpcoipprofileName) vpnpcoipprofile.Conserverurl = d.Get("conserverurl").(string) hasChange = true } if d.HasChange("icvverification") { log.Printf("[DEBUG] citrixadc-provider: Icvverification has changed for vpnpcoipprofile %s, starting update", vpnpcoipprofileName) vpnpcoipprofile.Icvverification = d.Get("icvverification").(string) hasChange = true } if d.HasChange("sessionidletimeout") { log.Printf("[DEBUG] citrixadc-provider: Sessionidletimeout has changed for vpnpcoipprofile %s, starting update", vpnpcoipprofileName) vpnpcoipprofile.Sessionidletimeout = d.Get("sessionidletimeout").(int) hasChange = true } if hasChange { _, err := client.UpdateResource("vpnpcoipprofile", vpnpcoipprofileName, &vpnpcoipprofile) if err != nil { return fmt.Errorf("Error updating vpnpcoipprofile %s", vpnpcoipprofileName) } } return readVpnpcoipprofileFunc(d, meta) } func deleteVpnpcoipprofileFunc(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] citrixadc-provider: In deleteVpnpcoipprofileFunc") client := meta.(*NetScalerNitroClient).client vpnpcoipprofileName := d.Id() err := client.DeleteResource("vpnpcoipprofile", vpnpcoipprofileName) if err != nil { return err } d.SetId("") return nil }
1. Field of the Invention This invention relates generally to devices which trap and/or pump waste gasses from industrial processes, and more particularly to cryogenic traps or pumps (cryotraps or cryopumps). 2. Description of the Prior Art Cryogenics deals with the production of low temperatures and the utilization of low temperature phenomenon. The cryogenic temperatures are generally considered to range from 123.degree. K. to 0.degree. K. Gases used in cryogenic engineering are cooled to their boiling (or liquefying) points by three basic methods, namely liquid expansion, Joule-Thomson expansion, and expansion in an engine (refrigeration). After production, cryogenic liquids generally are stored in specially designed tanks using superinsulation or in Dewar vessels (double walled flasks having an evacuated space between them). Liquid air, oxygen, nitrogen, and even hydrogen can be kept for several hours in such vessels without further thermal protection. Liquid helium, however, has such a low heat of vaporization that it can be kept for any length of time only if the Dewar vessel is in turn surrounded by a similar, larger flask containing liquid nitrogen or liquid air. For several industrial and research purposes, cryogenic pumps (cryopumps) are used to attain hard vacuums beyond the reach of mechanical pumps. Gasses will condense on a surface if the temperature is low enough, much as water vapor will condense a cold windowpane. In one system an absorbent (such as silica gel) is bonded to the surface of a cryopanel. The pumping speeds of cryogenically cooled absorbents at very low pressure are sensitive to the amount absorbed, but independent of the depth of the absorbing material. The capacity of the material to absorb increases rapidly with decreasing temperatures. By cooling the absorbent to 77.degree. K., all gasses except hydrogen, helium, and neon can be effectively trapped. Cryotraps, which are closely related to cryopumps, are often used to trap gasses formed as a by-product of industrial processes. A typical cryotrap of the prior art includes a liquid nitrogen refrigeration system which chills a condensation surface to cryogenic temperatures. The waste industrial gasses condense on the condensation surface, and are periodically removed therefrom in a flushing process. A problem with the old liquid nitrogen cryotrap technology is that it consumes large quanities of liquid nitrogen, requiring frequent deliveries of that substance. Furthermore, the plumbing and facilities required to house the nitrogen are bulky and expensive. The relatively new technology of helium cryopump refrigeration solves some of the problems of the old nitrogen cryotrap systems, but presents a few new ones of its own. A helium cryo refrigeration system includes a cold head through which high pressure gaseous helium is circulated. Typically, a compressor provides gaseous helium to the cold head at approximately 250 PSI, and recycles the effluent. The helium cryo refrigeration system is advantageous over the old nitrogen systems in that the helium is constantly regenerated, eliminating the need for large storage vessels and frequent deliveries. Disadvantages of the helium systems include that they require many hours to reach cryogenic temperatures, and thus are not well suited for use in cryotraps which have to be periodically shut down for regeneration. Also, the helium system is not compatible with highly reactive gasses such as chlorine due to the characteristics of the materials used in the construction of the cold head and the associated condensation surfaces. A solution to these problems would be to provide a thermal switch which insulates and protects the helium cold head from the condensation surface. One such thermal switch is disclosed in U.S. Pat. No. 3,525,229 of Denhoy which includes an inner vessel filled with liquid helium, and an outer vessel which may be selectively filled with a liquefied gas or evacuated with a vacuum pump. When the outer vessel is filled with the gas, the heat is conducted from the condensation surface via the liquefied gas to the liquid helium. When the outer vessel is evacuated the condensation surface is effectively insulated from the liquid helium. In U.S. Pat. No. 4,432,208 of Onuki et al., a cold trap for liquid sodium is disclosed which has a double walled structure providing a volume 16 which may be filled with a heat insulating gas. U.S. Pat. No. 4,354,356 of Milner, teaches a temperature cycled cold trap provided with temperature sensors in a feed back mechanism. While the above identified patents teach useful cryogenic subassemblies, the prior art does not disclose a complete helium cryotrap system.
<reponame>orcunmadran/PythonUI from tkinter import * pencere = Tk() pencere.title("Otel Programı") pencere.iconbitmap("ico/otel.ico") pencere.mainloop()
Performance analysis of NTRU algorithm with non-post-quantum algorithms Abstract In opportunistic networks, the nodes connect to each other wirelessly and use the store-carry-forward technique to transmit the data from one node to another node. The nodes in opportunistic networks are heterogeneous, having high mobility, limited power, low density, short radio range, and numerous security threats to unauthorized nodes. The fundamental challenge in an opportunistic network is to secure and protect the information during communication in networks to achieve the users confidence. This issue is technically resolved by incorporating the cryptography algorithms that make both the virtual and modern world in a safer position. Asymmetric Cryptography makes information unintelligible to an unauthorized user and provides confidentiality to genuine users. Encryption and decryption technology are solutions to protect data from unauthorized users. There are many opportunistic network algorithms in the existing literature that provide optimal performance. However, in this research work, we propose the NTRU post-quantum algorithm because of its high performance, low cost, and fast execution during encryption and decryption of the data over the network. We also implemented and analyzed the performance of the proposed NTRU algorithm and compared its results with the Elliptic Curve Cryptography and ElGamal algorithm. After the result analysis, we conclude that our proposed technique is highly effective and secure.
<filename>core/src/main/java/io/zero88/jooqx/datatype/basic/BytesConverter.java<gh_stars>10-100 package io.zero88.jooqx.datatype.basic; import org.jetbrains.annotations.NotNull; import io.vertx.core.buffer.Buffer; import io.zero88.jooqx.datatype.JooqxConverter; public final class BytesConverter implements JooqxConverter<Buffer, byte[]> { @Override public byte[] from(Buffer vertxObject) { return vertxObject == null ? null : vertxObject.getBytes(); } @Override public Buffer to(byte[] jooqObject) { return jooqObject == null ? null : Buffer.buffer(jooqObject); } @Override public @NotNull Class<Buffer> fromType() { return Buffer.class; } @Override public @NotNull Class<byte[]> toType() { return byte[].class; } }
Field of the Invention The present invention relates in general to the field of computers and similar technologies, and in particular to software utilized in this field. Still more particularly, it relates to a method, system and computer-usable medium for efficient searching of a semantic model of resources and resource relationships. Description of the Related Art An entity-relationship (ER) model is a data model that provides a systematic way of describing the data or information aspects of a business domain or its process requirements. The business domain, or its process requirements, is modeled as entities (i.e., things or resources) that are characterized by various attributes (i.e., characteristics or properties). These entities are linked to one another by relationships that express the dependencies and requirements between them. Diagrams created to graphically represent these entities, attributes, and relationships are referred to as entity-relationship diagrams. A common need in information systems is to search through a collection of these entities in a model for those entities with predetermined attributes or relationships. Such information models are often used in software applications to capture relevant information about entities. The modeled entities, therefore, can be diverse and include physical assets, operations, organizations, metadata, or other related items. Relationships between those entities are equally diverse as they can represent a very broad set of associations. As an example, a city water infrastructure system may be modeled to show entities such as pipes, valves, flow meters, switches, regulators, etc. and their relationship to one another. These relationships may include physical connections, device types or manufacturer, purchase and installation dates, supplier information, and installation teams or contractors. Other relationships may include geospatial mapping within other city infrastructures, such as buildings, roads, sidewalks, subways, failure incidents, maintenance records, and so forth. It is also typical to represent information models as graphs, where nodes represent the modeled entities and edges between nodes represent a predetermined relationship. With this representation of a domain, the model can be navigated in un-prescribed ways to discover new or extended relationships between entities and entity associations. Such newly-discovered information can often assist in converting data into meaningful insights that can lead to optimization of business operations or processes. As a result, it may be advantageous to create informational models that are very complete and capture as many entities and relationships as possible in order to provide the basis for discovering previously unrealized associations. However, achieving completeness in an information model can also lead to increased complexity, which in turn can create challenges in navigating the model when attempting to discover the paths that are most likely to provide meaningful relationships. Generally, this refinement or filtering is achieved by adding a series of “where” clauses, or functional equivalents, to model queries implemented model navigation. However, this approach complicates model navigation as the user or application needs to know which relationships to include or exclude. As an alternative, filtering may be done at the data analysis layer such that identified relationships are not prioritized at the time of graph interrogation, but instead, only after the data is retrieved. This approach is likewise not optimal, as more data needs to be moved from the model to the interrogating application and the logic of that refinement is moved as an application responsibility, or to the user if unqualified data is represented for their consideration.
Oh my gosh! 2.7 Acres and The perfect water front property you have been looking for. Close to everything but tucked away in pretty North Haven Subdivision. 356' view of water. Easy access to Back Bay & Gulf of Mexico. Over an acre. Gorgeous buildable land at the end of a cul-de-sac...If you want waterfront, this is it!
Whither clinical validation? Clinical validation is the post-analytical addition of remarks onto a laboratory report aimed at helping the requesting clinician fully interpret the ¢ndings of a test. As such, it is one part of the clinical liaison process with users of the service. This task is usually performed bya suitably quali¢ed member of laboratory stai who is in the unique position of understanding the potential for analytical and pre-analytical errors, and can correlate the results to the patients clinical status. Until the 1990s, clinical validation of most clinical biochemistry tests was largely unavoidable since the relative unreliability of methods meant this task was an essential step in maintaining overall quality assurance (QA). Computer systems were also not yet at the stage where they could help identify clinically abnormal or analytically incorrect results from a sea of unremarkable reports. In addition, the workload of requests received was at a level where it was feasible for such a labour intensive system to be used. Analytical issues with assays still exist but as workloads have increased, methods generally become more reliable, and laboratory computers been put to better use, the role of clinical validation has shifted towards predominantly providing interpretative comments on a minority of computer-selected reports. There is certainly no shortage of anecdotal and more formal evidence that users of the laboratory service generally value the inclusion of these comments. It has also been suggested that such feedback is one of the reasons that countries with these systems in place have reduced demand for testing compared with similarly developed health-care nations who do not.With an increasing number of non-medical stai directly involved in patient decisions and a reduction in speci¢c clinical biochemistry courses in UK medical schools, it seems demand for clinical comments is liable to continue. However, unlike our radiology colleagues -who provide a similar service with little thought of doing otherwise -being scientists our profession has naturally questioned the degree to which clinical validation helps in patient care, and whether it is a worthwhile (and/or cost-eiective) use of highly skilled stai. There is also a lack of consensus about the breadth of tests which should be expected to be included in the clinical validation process, and, more worryingly, concerns raised about the quality of advice given. Unfortunately, these questions of bene¢t (or risk) associated with providing comments are not easily answered. Quantifying the positive opinions of users is relatively simple, but determining whether this translates into tangible improvements in patient care has proved far more dicult. There are, of course, additional or alternative ways of helping clinicians with the interpretation of tests. One is having a readily available telephone advice service for users of the laboratory to contact when they feel necessary. However, demonstrating the bene¢t to patients of solely using this approach (or harmwhen advice has not been sought) is likely to be just as dicult to demonstrate as it is with clinical validation. So what is the future for clinical validation? Clearly the phrase further research needs to be done is applicable here. Considering the resources already likely to be invested in clinical validation, basic data about which tests and inwhich laboratories this function applies remains largely unknown, so pursuit of this information needs to be a priority. Given the way in which clinical validation is embedded within UK laboratories, it means that any randomized trials attempting to address its eiectiveness would need to examine the eiects of removing, rather than introducing such an interpretative service.This means that further research may either need to be performed outside the UK or be limited to head-to-head comparisons between, say, clinical validationwith ad hoc telephone advice against a more readily accessible advice service without interpretative comments. In the meantime, it seems likely that clinical validationwill evolve further with enhancements to national information technology (IT), so that service users will be able to link abnormal results to locally or nationally agreed care pathways through best practice guidelines and map of medicine initiatives. Some of these links could undoubtedly be inserted automatically, but it remains to be seen whether the same health-care stai who complain of information overload would prefer a collection of Web addresses to a succinct clinical comment. There will be no future for clinical validation if the comments provided are potentially harmful to patients.
"""High-level APIs to Keccak1600 algorithm. """ from __future__ import division, absolute_import, print_function from spongeshaker.util import tohex __all__ = ['SpongeHash', 'SpongeHashInvalidState'] class SpongeHashInvalidState(Exception): """Extracting has started, cannot .update()/.digest().""" class SpongeHash(object): """Generic :mod:`hashlib` compatible hash function API. """ __slots__ = ('name', 'block_size', 'digest_size', '_padding', '_sponge', '_extracting') def __init__(self, capacity_bits, output_bits, data=None, name=None, sponge_class=None, padding=None, _sponge=None, _extracting=False): """Initialize sponge instance with specified parameters. Parameters: capacity_bits number of bits for capacity. digest_bits number of bits for digest output. data initial data to hash. name User-visible name for hash+parameters. sponge_class Sponge implementation class that implements the :class:`spongeshaker.sponge.Sponge` interface. padding Start bytes for padding bytes to use, final bit is always added. """ self._sponge = _sponge or sponge_class(capacity_bits) self._padding = padding self.name = name or _sponge.name self.block_size, rem1 = divmod(self._sponge.rate, 8) self.digest_size, rem2 = divmod(output_bits, 8) self._extracting = _extracting if rem1 or rem2: raise ValueError("capacity_bits and output_bits must be multiple of 8") if data is not None: self.update(data) def copy(self): """Create copy of current state. """ clone = self._sponge.copy() return SpongeHash(clone.capacity, self.digest_size * 8, None, self.name, None, self._padding, clone, self._extracting) def update(self, data): """Update state with data. Cannot be used after :meth:`extract` is called. """ if self._extracting: raise SpongeHashInvalidState() self._sponge.absorb(data) def digest(self): """Return final hash digest. This follows the :mod:`hashlib` convention that state is not changed so :meth:`update` can be called again to add more data to state. """ if self._extracting: raise SpongeHashInvalidState() tmp = self._sponge.copy() tmp.pad(self._padding) return tmp.squeeze(self.digest_size) def hexdigest(self): """Return :meth:`digest` value as hexadecimal string. """ return tohex(self.digest()) def extract(self, count): """Extract data from hash state. This function can be continued to be called to extract unlimited amount of bytes from state. It *does* change the state, so :meth:`update`, :meth:`digest` and :meth:`hexdigest` will throw error after :meth:`extract` has been called. """ if not self._extracting: self._sponge.pad(self._padding) self._extracting = True return self._sponge.squeeze(count)
Pseudo-Spectral Modeling in Geodynamo Many stars and planets have magnetic fields. The heat flux causes 3D convection of plasma or metal, which can generate a large-scale magnetic field like that observed. The small-scale behavior, demonstrating self-similarity in a wide range of the spatial and temporal scales, is a field of active research using modeling, as it is usually not observed. Rapid rotation gives a geostrophic system, where convection degenerates in the direction of the axis of rotation and all variation along this axis is weak. Such a system is somewhere in between the full 3D and 2D-systems. Its special properties show up in the physical and the spectral space simultaneously. Pseudo-spectral modeling solves the PDE in spectral space for easy calculations of integrals and derivatives. The nonlinear terms are calculated physical space, requiring many direct and inverse FFTs per time step. We apply this technique to the thermal convection problem with heating from below in a Cartesian box. Above a threshold of the kinetic energy the system generates the magnetic field. The most time consuming part of our MPI code is FFT transforms. For efficiency, we selected a FFT library which makes use of the symmetry of the fields. The optimal number of processors is ∼ half the number of grid planes, with superlinear speedup. The single node performance is poor, each processor delivering only ∼ 5% of its peak rate. We see cyclonic convection with a cyclone density of the ∼ E−1/3 (E Ekman number ∼ 10 for earth). This causes a high anisotropy of the convection even for high Reynolds numbers. Our simulations demonstrates the generation of the large-scale hydrodynamic helicity. Helicity is an integral of the Navier-Stokes equation, and it has close relation to the -effect which generates the large scale magnetic field via the small-scale turbulence. This process has three stages:. At first, the magnetic field grows exponentially from a small seed. When the magnetic and kinetic energies are comparable the growth slows down, and finally equilibrium is reached. The magnetic field again quenches the helicity, damping primarily the toroidal part of velocity field. It slows down the rotation of the cyclones (anti-cyclones). The helicity causes a divergence (convergence) of the cyclones near the upper (lower) boundaries (z = 0, 1). It is generated at the boundaries and transported to center of the box. It changes sign at the middle of the box. Convection and dynamo systems are dissipative, so the equilibrium of the system in sense of the statistical mechanics is not reached. The kinetic energy is injected into the system at the medium scale of cyclons, one sink of energy is at the small viscous scale, another at the large (magnetic field) scale. For some (small) scales the cascade of the energy is direct (like it is in the Kolmogorovs like turbulence), for others (larger than cyclones) it is inverse, like it is observed in 2D turbulence. At the small scales there is a constant energy flux, as is plausible as from theorie and from semi-empirical models.
use serde_derive::Deserialize; use anyhow::{Context, Result}; use std::fs::read_to_string; use std::path::PathBuf; use crate::rmq::StatType; #[derive(Deserialize, Debug)] pub struct Config { pub rabbitmq: RabbitMqConfig, pub settings: MonitorSettings, pub slack: SlackConfig, pub triggers: Vec<Trigger>, } #[derive(Deserialize, Debug, Clone)] pub struct RabbitMqConfig { #[serde(default = "default_protocol")] pub protocol: String, pub host: String, pub username: String, pub password: String, pub port: String, pub vhost: String, } fn default_protocol() -> String { "https".into() } #[derive(Deserialize, Debug)] pub struct MonitorSettings { pub poll_seconds: u64, #[serde(default = "default_expiration")] pub msg_expiration_seconds: u64, } fn default_expiration() -> u64 { 600 } #[derive(Deserialize, Debug)] pub struct SlackConfig { pub webhook_url: String, pub channel: String, pub screen_name: String, pub icon_url: Option<String>, pub icon_emoji: Option<String>, } #[derive(Deserialize, Debug)] #[serde(tag = "type", rename_all = "snake_case")] pub enum Trigger { ConsumersTotal(TriggerData), MemoryTotal(TriggerData), MessagesTotal(TriggerData), MessagesReady(TriggerData), MessagesUnacknowledged(TriggerData), MessagesTotalRate(TriggerData), MessagesReadyRate(TriggerData), MessagesUnacknowledgedRate(TriggerData), MessagesPublishRate(TriggerData), MessagesDeliveryRate(TriggerData), MessagesRedelivered(TriggerData), MessagesRedeliverRate(TriggerData), } impl Trigger { pub fn data(&self) -> &TriggerData { match self { Trigger::ConsumersTotal(data) => data, Trigger::MemoryTotal(data) => data, Trigger::MessagesTotal(data) => data, Trigger::MessagesReady(data) => data, Trigger::MessagesUnacknowledged(data) => data, Trigger::MessagesTotalRate(data) => data, Trigger::MessagesReadyRate(data) => data, Trigger::MessagesUnacknowledgedRate(data) => data, Trigger::MessagesPublishRate(data) => data, Trigger::MessagesDeliveryRate(data) => data, Trigger::MessagesRedelivered(data) => data, Trigger::MessagesRedeliverRate(data) => data, } } pub fn stat_type(&self) -> StatType { match *self { Trigger::ConsumersTotal(_) => StatType::ConsumersTotal, Trigger::MemoryTotal(_) => StatType::MemoryTotal, Trigger::MessagesTotal(_) => StatType::MessagesTotal, Trigger::MessagesReady(_) => StatType::MessagesReady, Trigger::MessagesUnacknowledged(_) => StatType::MessagesUnacknowledged, Trigger::MessagesTotalRate(_) => StatType::MessagesTotalRate, Trigger::MessagesReadyRate(_) => StatType::MessagesReadyRate, Trigger::MessagesUnacknowledgedRate(_) => StatType::MessagesUnacknowledgedRate, Trigger::MessagesPublishRate(_) => StatType::MessagesPublishRate, Trigger::MessagesDeliveryRate(_) => StatType::MessagesDeliveryRate, Trigger::MessagesRedelivered(_) => StatType::MessagesRedelivered, Trigger::MessagesRedeliverRate(_) => StatType::MessagesRedeliverRate, } } pub fn name(&self) -> &'static str { match *self { Trigger::ConsumersTotal(_) => "total number of consumers", Trigger::MemoryTotal(_) => "memory consumption", Trigger::MessagesTotal(_) => "total number of messages", Trigger::MessagesReady(_) => "ready messages", Trigger::MessagesUnacknowledged(_) => "unacknowledged messages", Trigger::MessagesTotalRate(_) => "total messages per second", Trigger::MessagesReadyRate(_) => "ready messages per second", Trigger::MessagesUnacknowledgedRate(_) => "unacknowledged messages per second", Trigger::MessagesPublishRate(_) => "published messages per second", Trigger::MessagesDeliveryRate(_) => "delivered messages per second", Trigger::MessagesRedelivered(_) => "redelivered messages", Trigger::MessagesRedeliverRate(_) => "redelivered messages per second", } } } #[derive(Deserialize, Debug)] pub struct TriggerData { pub threshold: f64, #[serde(default = "default_trigger_when")] pub trigger_when: TriggerWhen, pub queue: Option<String>, } #[derive(Deserialize, Debug)] #[serde(rename_all = "snake_case")] pub enum TriggerWhen { Above, Below, } fn default_trigger_when() -> TriggerWhen { TriggerWhen::Above } pub fn read_config(path: &PathBuf) -> Result<Config> { let config_contents: String = read_to_string(path).with_context(|| { format!( "Could not read config {}", path.as_path().display().to_string() ) })?; let config: Config = toml::from_str(&config_contents).context("Could not parse TOML config")?; Ok(config) }
<filename>virtual-UE-eNB/srsLTE-5d82f19988bc148d7f4cec7a0f29184375a64b40/srsue/test/upper/rrc_reconfig_test.cc /** * * \section COPYRIGHT * * Copyright 2013-2015 Software Radio Systems Limited * * \section LICENSE * * This file is part of the srsUE library. * * srsUE is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * srsUE is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * A copy of the GNU Affero General Public License can be found in * the LICENSE file in the top-level directory of this distribution * and at http://www.gnu.org/licenses/. * */ #include <iostream> #include <srslte/srslte.h> #include "srslte/common/log_filter.h" #include "srslte/asn1/liblte_rrc.h" #include "srslte/asn1/liblte_mme.h" void nas_test() { srslte::log_filter log1("NAS"); log1.set_level(srslte::LOG_LEVEL_DEBUG); log1.set_hex_limit(-1); uint32_t nas_message_len = 73; uint8_t nas_message[128] = {0x27, 0x4f, 0xab, 0xef, 0x59, 0x01, 0x07, 0x42, 0x01, 0x49, 0x06, 0x40, 0x00, 0xf1, 0x10, 0x31, 0x32, 0x00, 0x22, 0x52, 0x01, 0xc1, 0x05, 0x07, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x0b, 0x76, 0x7a, 0x77, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x05, 0x01, 0x0e, 0x0e, 0x0e, 0x01, 0x5e, 0x04, 0xfe, 0xfe, 0x81, 0x4e, 0x50, 0x0b, 0xf6, 0x00, 0xf1, 0x10, 0x00, 0x02, 0x01, 0x01, 0x00, 0x00, 0x62, 0x17, 0x2c, 0x59, 0x49, 0x64, 0x01, 0x03}; uint8 pd; uint8 msg_type; LIBLTE_BYTE_MSG_STRUCT buf; LIBLTE_MME_ATTACH_ACCEPT_MSG_STRUCT attach_accept; bzero(&attach_accept, sizeof(LIBLTE_MME_ATTACH_ACCEPT_MSG_STRUCT)); LIBLTE_MME_ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST_MSG_STRUCT act_def_eps_bearer_context_req; bzero(&act_def_eps_bearer_context_req, sizeof(LIBLTE_MME_ACTIVATE_DEFAULT_EPS_BEARER_CONTEXT_REQUEST_MSG_STRUCT)); bzero(&buf, sizeof(LIBLTE_BYTE_MSG_STRUCT)); memcpy(buf.msg, nas_message, nas_message_len); buf.N_bytes = nas_message_len; liblte_mme_parse_msg_header(&buf, &pd, &msg_type); switch(msg_type) { case LIBLTE_MME_MSG_TYPE_ATTACH_ACCEPT: liblte_mme_unpack_attach_accept_msg(&buf, &attach_accept); liblte_mme_unpack_activate_default_eps_bearer_context_request_msg(&attach_accept.esm_msg, &act_def_eps_bearer_context_req); break; case LIBLTE_MME_MSG_TYPE_ATTACH_REJECT: break; case LIBLTE_MME_MSG_TYPE_AUTHENTICATION_REQUEST: break; case LIBLTE_MME_MSG_TYPE_AUTHENTICATION_REJECT: break; case LIBLTE_MME_MSG_TYPE_IDENTITY_REQUEST: break; case LIBLTE_MME_MSG_TYPE_SECURITY_MODE_COMMAND: break; case LIBLTE_MME_MSG_TYPE_SERVICE_REJECT: break; case LIBLTE_MME_MSG_TYPE_ESM_INFORMATION_REQUEST: break; case LIBLTE_MME_MSG_TYPE_EMM_INFORMATION: break; default: break; } } void basic_test() { srslte::log_filter log1("RRC"); log1.set_level(srslte::LOG_LEVEL_DEBUG); log1.set_hex_limit(-1); LIBLTE_BIT_MSG_STRUCT bit_buf; LIBLTE_RRC_DL_DCCH_MSG_STRUCT dl_dcch_msg; uint32_t rrc_message_len = 147; uint8_t rrc_message[256] = {0x22, 0x16, 0x95, 0xa0, 0x18, 0x00, 0x05, 0xaa, 0x50, 0x36, 0x00, 0x61, 0x08, 0x9c, 0xe3, 0x40, 0xb0, 0x84, 0x4e, 0x71, 0xc0, 0x30, 0x84, 0x6e, 0x71, 0xe0, 0x70, 0x84, 0x6e, 0x70, 0x6c, 0x63, 0x1a, 0xc6, 0xb9, 0x8e, 0x7b, 0x1e, 0x84, 0xc0, 0x01, 0x24, 0x9d, 0x3e, 0xaf, 0xbd, 0x64, 0x04, 0x1d, 0x08, 0x05, 0x24, 0x19, 0x00, 0x03, 0xc4, 0x40, 0xc4, 0xc8, 0x00, 0x89, 0x48, 0x07, 0x04, 0x14, 0x1f, 0xff, 0xff, 0xff, 0xfc, 0x30, 0x2d, 0xd9, 0xe9, 0xdd, 0xa5, 0xb9, 0xd1, 0x95, 0xc9, 0xb9, 0x95, 0xd0, 0x14, 0x04, 0x38, 0x38, 0x38, 0x05, 0x78, 0x13, 0xfb, 0xfa, 0x05, 0x39, 0x40, 0x2f, 0xd8, 0x03, 0xc4, 0x40, 0x00, 0x08, 0x04, 0x04, 0x00, 0x01, 0x88, 0x5c, 0xb1, 0x65, 0x25, 0x90, 0x04, 0x0d, 0xa9, 0xc0, 0x2a, 0x9a, 0x01, 0x99, 0x3b, 0x01, 0xf5, 0x12, 0xf0, 0x85, 0x0d, 0x85, 0xef, 0xc0, 0x01, 0xf2, 0x20, 0x60, 0x18, 0x07, 0x97, 0x09, 0x1f, 0xc3, 0x06, 0x00, 0x81, 0x00, 0x00, 0x11}; srslte_bit_unpack_vector(rrc_message, bit_buf.msg, rrc_message_len*8); bit_buf.N_bits = rrc_message_len*8; liblte_rrc_unpack_dl_dcch_msg((LIBLTE_BIT_MSG_STRUCT*)&bit_buf, &dl_dcch_msg); printf("done\n"); } int main(int argc, char **argv) { basic_test(); nas_test(); }
/// Add another route to the router. /// /// # Example /// /// ```rust /// use axum::prelude::*; /// /// async fn first_handler() { /* ... */ } /// /// async fn second_handler() { /* ... */ } /// /// async fn third_handler() { /* ... */ } /// /// // `GET /` goes to `first_handler`, `POST /` goes to `second_handler`, /// // and `GET /foo` goes to third_handler. /// let app = route("/", get(first_handler).post(second_handler)) /// .route("/foo", get(third_handler)); /// # async { /// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); /// # }; /// ``` fn route<T, B>(self, description: &str, svc: T) -> Route<T, Self> where T: Service<Request<B>> + Clone, { Route { pattern: PathPattern::new(description), svc, fallback: self, } }
def fill_with_random_letters(self): for y in range(self.height): for x in range(self.width): if self.letters[y][x] is None: self.letters[y][x] = random.choice(string.ascii_uppercase)
<reponame>Txbias/Chinese-VocabtrainerV2 from flask import Blueprint, redirect, url_for root_blueprint = Blueprint('root_blueprint', __name__, template_folder='templates') @root_blueprint.route('/') def root(): # TODO: Look for session cookies return redirect(url_for('login_blueprint.login'))
Motel 6 announced it will stop sending customer lists to Immigration and Customs Enforcement (ICE) on Wednesday after its corporate office was made aware of the practice at some locations. The company's announcement came just hours after the Phoenix New Times reported that at least two Arizona Motel 6 locations were sending guest lists to ICE agents every morning, prompting ICE sting operations at the businesses. At least 20 undocumented immigrants have been arrested under this practice, the New Times reported. “This was implemented at the local level without the knowledge of senior management,” Motel 6 said in a statement. "When we became aware of it last week, it was discontinued." Statement Regarding Recent Media Reports on Phoenix-area Location pic.twitter.com/MPxaspNA6b — Motel 6 (@motel6) September 14, 2017 The New Times reported that ICE agents performed "knock and talks" at the locations after receiving the guest lists, which means officers show up at the hotel without a warrant and knock on doors asking permission to enter. If they are refused, they come back with a warrant. ADVERTISEMENT "We send a report every morning to ICE — all the names of everybody that comes in," one front-desk clerk told the New Times. "Every morning at about 5 o'clock, we do the audit and we push a button and it sends it to ICE." A Phoenix-area immigration attorney also wrote the New Times in an email and reported that some of her clients "have heard (no telling how valid the info is) that ICE is paying $200 per person for the front-desk clerk to report." In a statement to the newspaper, ICE refused to confirm the reports or how the agency gathers tips for its investigations. “I wouldn’t be able to confirm how we are getting our information. Those are investigative techniques that we wouldn’t be able to talk about,” ICE spokeswoman Yasmeen Pitts O'Keefe said. “If hypothetically we were somewhere — if we did administratively arrest some folks — that happens all the time. We conduct targeted enforcement operations every day.” Motel 6 issued a more detailed statement on Thursday, saying that the company had instructed all locations that sending guest lists to ICE is prohibited. "Moving forward, to help ensure that this does not occur again, we will be issuing a directive to every one of our more than 1,400 locations nationwide, making clear that they are prohibited from voluntarily providing daily guest lists to ICE," wrote a spokesperson for Motel 6's corporate office. "Protecting the privacy and security of our guests are core values of our company. Motel 6 apologizes for this incident and will continue to work to earn the trust and patronage of our millions of loyal guests," the statement added.
. We made quantitative and qualitative analyses of dusts in lung specimens using proton-induced X-ray emission (PIXE) and analytical electron microscopy (AEM). IIP group consisted of 23 patients. Control group, that had no apparent history for dust inhalation, consisted of 21 patients. Control group was matched with IIP group with respects to sex, age, smoking index and life style. For PIXE analysis, the elements Al, Si, P, S, Cl, K, Ca, Ti, Cr, Fe, Co, Ni, Cu, W and Au were examined for this study. Tissue specimen preparation for AEM study was based upon carbon extraction method. The amounts of Al and Si were significantly larger in IIP group than in the control group (Al: IIP 24.40 +/- 19.08 ng/60.8 mm2 and control 11.90 +/- 8.66, P less than 0.01; Si: IIP 54.43 +/- 45.18 and control 28.72 +/- 15.56, P less than 0.05). The elements Co, W and Au were not detected. The amounts of Si correlated inversely with PaO2 (r = -0.454, P less than 0.05). AEM study demonstrated larger amounts of free silica and silicate in IIP group than in the control group (free silica: IIP 0.7% and control 0.2%, P less than 0.01; silicate: IIP 2.6% and control 0.8%, P less than 0.01). These results suggest that inhalation of Si, especially of free silica, may have etiologic significance in IIP.
WE ALL know Serena Williams is a wonder woman but she is so remarkably different to the person she was 10 months ago. She is proof a rule that limits so many of us is dumb. After giving birth to daughter Olympia in September, the tennis superstar was treated for blood clots, including a pulmonary embolism, and didn’t know if she’d live to see her baby grow up let alone win another tennis match. But traumatic memories of her complicated health struggles can be put to the side on Saturday night (AEST) when Williams plays Angelique Kerber in the Wimbledon final for a chance to win her 24th grand slam and equal Australian Margaret Court’s record for most major singles titles. It’ll be her 10th appearance in a final at the All England Club but by far her most unique. “This is not inevitable for me,” Williams said after her 6-2 6-4 semi-final win over Julia Goerges on Friday morning. “I had a really tough delivery, I had to have multiple surgeries and almost didn’t make it, to be honest. “I remember I couldn’t even walk to my mailbox, so it’s definitely not ‘normal’ for me to be in a Wimbledon final. Nobody can accuse Williams of being “normal” on the court. She’s always come across more superhero than mere mortal with a racquet in hand — and never has that been more evident than at Wimbledon this year. In just her fourth tournament back since becoming a mother, the American has only dropped one set — in her quarter-final against Camilia Giorgi — en route to reaching the final. After surviving that scare against the Italian, husband Alexis Ohanian expressed just how incredible her comeback has been. Williams hasn’t just fought against physical ailments and the mental demons that come from being out of the game for so long. She’s also had to deal with entering the year’s third grand slam amid a storm that threatened her seeding as one of the top 32 players in the draw. Ranked 181st in the world following her lengthy lay-off — which was exacerbated by her retirement from the French Open with a pectoral injury — there were doubts about whether Wimbledon organisers would give Williams a seeding. Players who are seeded are afforded extra protection in the early rounds of a major by facing lower-ranked opponents. By entering Wimbledon unseeded, tennis officials faced the embarrassing prospect of the sport’s biggest drawcard facing a fellow big name, resulting in a genuine championship contender getting knocked out prematurely. The Wimbledon rule-book states: “The seeding order follows the WTA ranking list, except where, in the opinion of the committee, a change is necessary to produce a balanced draw.” Changes are often made to account for injury, so a player who’s ranked outside the top 32 may be seeded if they’ve only lost rankings points because they’ve been hurt. But given Williams’ pregnancy was “totally different from an injury”, Wimbledon chairman Philip Brook said, there was uncertainty about whether she would be afforded such treatment. Fortunately sanity prevailed, and the 36-year-old came into the tournament as the 25th seed. But she’s made a mockery of that number, steamrolling opponents while top seeds fell like flies in week one in London. Nine of the top 10 seeds suffered early exits but all the while Williams hasn’t missed a beat, much to her rivals’ misfortune. Serena Williams leads Julia Goerges 6-2, 5-2. One game from the final. Still not sure how the #Wimbledon seeding committee sat down and decided that 24 women were more likely to win this tournament than Serena. The victory over Goerges extended Williams’ winning streak at Wimbledon to 20 matches, dating back to the start of the 2015 edition. She’s also won her past 15 grand slam matches since the start of the 2017 Australian Open, which she won while pregnant. An eighth Wimbledon crown would surely rank as Williams’ most cherished, given what she’s endured to just get back on the court let alone be among the final two competitors standing. “It’s been a crazy 10 months,” Williams said. “I was still pregnant at this time last year. That’s something I have to keep reminding myself. But Williams isn’t obsessing over what it would mean to equal Court’s all-time record of 24 major singles titles. After all, she’s had other things to worry about. “To be perfectly honest, I haven’t thought about that this tournament,” Williams said. “Not even once actually. In fact, I’ve probably forgotten about it. “I think that’s a good thing because, you know, I put so much pressure on myself when I was trying to get to 18 (grand slam titles), then the rest, it was so much. “But as I said in the past couple years, I don’t want to limit myself. I think that’s what I was doing in the past, I was limiting myself.
Interesting question. I have never heard anyone complain that their particular race is not included/represented in the interracial dating scene. However, I do notice that East Asian men don't have as much luck with dating across racial lines as others. I remember one Caucasian guy with a preference for Black women telling me, "I'm not into Asian women." I was a little surprised, since most guys my age seemed to have a thing for Asian women, especially the ones who grew up on anime. "Why not?" I asked him, expecting some BS answer. He said, "They're cute, but I have to think of my kids. I've heard girls say, 'I'm really into White guys' or 'I love my chocolate' - but never heard any girl say, 'Asian guys really turn me on'. Not one time. Not ever. What if I have a son?" But from personal experience, I don't think people can really control what they are sexually attracted to, even if they can control whether or not they act on it. Interracial dating isn't a club or a cult we join. It's individual people making individual decisions about who they want to be with. Sometimes we have these decisions in common with other people, and sometimes we don't. That being said, I don't believe there's any mutual decision among us all not to date East Asian men. I (personally) don't find East Asian men sexually attractive, though I kept quite a few of them as friends in college and got along great with them. That's just a part of my sexual preference. Your best bet is to just find a girl who's into East Asian men - and contrary to what that guy said, I've known quite a few who are.
#include<bits/stdc++.h> using namespace std; const int MAXN = 1e5 + 10; struct event{ int x, down, up, t; event(){x = 0, down = 0, up = 0, t = 0;} event(int _x, int _down, int _up, int _t){x = _x, down = _down, up = _up, t = _t;} bool operator < (event b) const{ if(x == b.x) return t < b.t; return x < b.x; } }; struct node{ int min, qnt, lz; node(){min = 0, qnt = 1, lz = 0;} node(int _min, int _qnt, int _lz){min = _min, qnt = _qnt, lz = _lz;} }seg[4 * MAXN]; vector<event> line; int n, m; vector<int> ar[MAXN]; int marc[MAXN]; int temp; int treeLin[MAXN], treeIni[MAXN], treeEnd[MAXN]; int ans[MAXN]; void dfs(int cur); void merge(int pos, int e, int d); void build(int pos, int ini, int fim); void refresh(int pos, int ini, int fim); void update(int pos, int ini, int fim, int p, int q, int val); int main(){ scanf("%d %d", &n, &m); build(1, 1, n); for(int i = 1; i < n; i++){ int u, v; scanf("%d %d", &u, &v); ar[u].push_back(v); ar[v].push_back(u); } dfs(1); for(int i = 0; i < m; i++){ int a, b; scanf("%d %d", &a, &b); line.push_back(event(treeIni[a], treeIni[a], treeEnd[a], 0)); line.push_back(event(treeEnd[a], treeIni[a], treeEnd[a], 2)); line.push_back(event(treeIni[a], treeIni[b], treeEnd[b], 0)); line.push_back(event(treeEnd[a], treeIni[b], treeEnd[b], 2)); line.push_back(event(treeIni[b], treeIni[b], treeEnd[b], 0)); line.push_back(event(treeEnd[b], treeIni[b], treeEnd[b], 2)); line.push_back(event(treeIni[b], treeIni[a], treeEnd[a], 0)); line.push_back(event(treeEnd[b], treeIni[a], treeEnd[a], 2)); } for(int i = 1; i <= temp; i++){ line.push_back(event(i, i, i, 0)); line.push_back(event(i, i, i, 1)); line.push_back(event(i, i, i, 2)); } sort(line.begin(), line.end()); for(int i = 0; i < line.size(); i++){ int t = line[i].t; // printf("LINE: %d %d %d %d\n", line[i].x, line[i].down, line[i].up, line[i].t); if(t == 0){ int down, up; down = line[i].down; up = line[i].up; update(1, 1, temp, down, up, 1); } if(t == 1){ int x = line[i].x; if(seg[1].min != 0) ans[treeLin[x]] = temp; else ans[treeLin[x]] = temp - seg[1].qnt; // printf("(%d) %d - %d = %d\n", treeLin[x], temp, seg[1].qnt, temp - seg[1].qnt); } if(t == 2){ int down, up; down = line[i].down; up = line[i].up; update(1, 1, temp, down, up, -1); } } for(int i = 1; i <= n; i++){ printf("%d ", ans[i] - 1); } printf("\n"); } void dfs(int cur){ marc[cur] = 1; treeLin[++temp] = cur; treeIni[cur] = temp; for(int i = 0; i < ar[cur].size(); i++){ int viz = ar[cur][i]; if(marc[viz]) continue; dfs(viz); } treeEnd[cur] = temp; } void merge(int pos, int e, int d){ if(seg[e].min == seg[d].min) seg[pos] = seg[e], seg[pos].qnt += seg[d].qnt; if(seg[e].min < seg[d].min) seg[pos] = seg[e]; if(seg[e].min > seg[d].min) seg[pos] = seg[d]; seg[pos].lz = 0; return; } void build(int pos, int ini, int fim){ if(ini == fim){ seg[pos] = node(); return; } int mid = (ini + fim) >> 1, e = pos << 1, d = e | 1; build(e, ini, mid); build(d, mid + 1, fim); merge(pos, e, d); return; } void refresh(int pos, int ini, int fim){ if(seg[pos].lz == 0) return; seg[pos].min += seg[pos].lz; if(ini != fim){ int e = pos << 1, d = e | 1; seg[e].lz += seg[pos].lz; seg[d].lz += seg[pos].lz; } seg[pos].lz = 0; } void update(int pos, int ini, int fim, int p, int q, int val){ refresh(pos, ini, fim); if(ini > q || fim < p) return; if(ini >= p && fim <= q){ seg[pos].lz += val; refresh(pos, ini, fim); return; } int mid = (ini + fim) >> 1, e = pos << 1, d = e | 1; update(e, ini, mid, p, q, val); update(d, mid + 1, fim, p, q, val); merge(pos, e, d); return; }
// NewEventTimeoutWatcher returns a new watcher func NewEventTimeoutWatcher(parentLogger logger.Logger, timeout time.Duration, processor Processor) (*EventTimeoutWatcher, error) { watcher := &EventTimeoutWatcher{ logger: parentLogger.GetChild("timeout"), timeout: timeout, processor: processor, } go watcher.watch() return watcher, nil }
The bipartisan Surveillance State Repeal Act, if passed, would repeal dragnet surveillance of Americans’ personal communications, overhaul the federal domestic surveillance program, and provide protections for whistleblowers. House lawmakers Mark Pocan (D-Wis.) and Thomas Massie (R-Ky.) are co-sponsoring bill H.R.1466, which was introduced on Tuesday and would repeal the 2001 Patriot Act, limit powers of the FISA Amendments Act, and prohibit retaliation against federal national security whistleblowers, according to The Hill. “The Patriot Act contains many provisions that violate the Fourth Amendment and have led to a dramatic expansion of our domestic surveillance state,” said Rep. Massie in a statement. "Our Founding Fathers fought and died to stop the kind of warrantless spying and searches that the Patriot Act and the FISA Amendments Act authorize. It is long past time to repeal the Patriot Act and reassert the constitutional rights of all Americans.” As it looks right now, this is HUGE!!!! If this is what it appears to be, we have to fight like animals for it! - http://t.co/nHoXtd0MGB — Wade Biery (@WadeBiery) March 24, 2015 Specifically, the bill would revoke all the powers of the Patriot Act, and instruct the Director of National Intelligence and the Attorney General to destroy any information collected under the FISA Amendments Act concerning any US person not under investigation. It would repeal provisions of the FISA Amendments Act to ensure surveillance of email data only occurs with a valid warrant based on probable cause. The bill would also prohibit the government from mandating that manufacturers build mechanisms allowing the government to bypass encryption in order to conduct surveillance. READ MORE:‘You are surveillance target’ – Snowden to IT specialists Additionally, the bill would protect a federal whistleblower’s efforts to expose mismanagement, waste, fraud, abuse, or criminal behavior. It would also make retaliation against anyone interfering with those efforts – such as threatening them with punishment or termination – illegal. “Really, what we need are new whistleblower protections so that the next Edward Snowden doesn’t have to go to Russia or Hong Kong or whatever the case may be just for disclosing this,” Massie said. There have been previous attempts to limit dragnet surveillance under the Patriot Act since former National Security Agency analyst Edward Snowden leaked information regarding the programs in 2013, but the Senate bill introduced in 2013 never reached the floor for a vote. “The warrantless collection of millions of personal communications from innocent Americans is a direct violation of our constitutional right to privacy,” said Rep. Pocan in a statement. READ MORE:DARPA launches new-tech program to protect online privacy “Revelations about the NSA’s programs reveal the extraordinary extent to which the program has invaded Americans’ privacy. I reject the notion that we must sacrifice liberty for security – we can live in a secure nation which also upholds a strong commitment to civil liberties. This legislation ends the NSA’s dragnet surveillance practices, while putting provisions in place to protect the privacy of American citizens through real and lasting change.” Portions of the Patriot Act are due for renewal on June 1.
INHALATION OF LOW CONCENTRATIONS OF ASBESTOS enter hospital coronary care units on the average five years later than they do now, we would have accomplished more than by creating a world of survivors with artificial hearts, or of family units where (as Friedberg has remarked) every affluent household owns a portable defibrillator. To date the coronary care unit has proved a valuable means for the prophylaxis and treatment of electrical catastrophes. With the limitations on myocardial function set by the underlying coronary artery disease there is an obvious limit to the benefits which can be expected. With the consequent law of diminishing returns, hoped-for benefits must inevitably be weighed against the expected cost in relation to other national health priorities. With the high standard of our existing ambulance services, the modest improvement which can be expected from coronary ambulances in the outlook from "sudden death" can probably best be attained at a bearable cost to the community through greater use of paramedical personnel resources.
#include <xtensor/xtensor.hpp> #include <xtensor/xrandom.hpp> #include <highfive/H5Easy.hpp> #include <XDMFWrite_HighFive.hpp> namespace xh = XDMFWrite_HighFive; int main() { xt::xtensor<double,2> coor = { {0, 0}, {0, 1}, {0, 2}, {1, 0}, {1, 1}, {1, 2}}; xt::xtensor<size_t,1> conn = xt::arange<size_t>(coor.shape(0)); xt::xtensor<double,1> radius = xt::random::rand<double>({coor.shape(0)}); H5Easy::File file("grid_structured.h5", H5Easy::File::Overwrite); H5Easy::dump(file, "/coor", coor); H5Easy::dump(file, "/conn", conn); H5Easy::dump(file, "/radius", radius); auto grid = xh::Grid({ xh::Structured(file, "/coor", "/conn"), xh::Attribute(file, "/radius", xh::AttributeCenter::Node)}); xh::write("grid_structured.xdmf", grid); return 0; }
Instance-based web services composition and verification Web services composition and verification are very important techniques of Service-Oriented Architecture (SOA). The composite web services are often created at design time and based on functional and non-functional (Quality of Service - QoS) properties. However, the values of QoS properties also depend on each specific instance of web service at runtime (instance-based). Consequently, when we execute the composite web service in the real systems, it may not be accurate with the QoS constraints. Another type of constraint that affects on the web services composition is the constraint on the execution order of component web services in a composite web service. This type of constraint is so-called temporal relation constraint. In this work, we propose a novel web services composition and verification method that is instance-based web services composition and verification. In addition, this study also proposes a new technique which combines all kinds of constraints: hard constraint (functional constraint), soft constraint (QoS constraint) and temporal relation constraint. Our approach has been evaluated on real-world case studies, and has shown promising results.
In general, a portable power tool, such as a planer, jigsaw, circular saw, drill driver, and the like, includes a trigger that is movable by an operator of the tool between an ON position and an OFF position to control power to the tool. Some power tools are also provided with a trigger release mechanism which is capable of keeping the trigger in the OFF position. The trigger release mechanism typically comprises a button or switch that can be moved to a position at which movement of the trigger from the OFF position to the ON position is blocked or otherwise prevented. The trigger release mechanism must be moved to a non-blocking position to allow the trigger to be moved to the ON position so the tool can be operated. Some other power tools are provides with a trigger lock-on mechanism which is capable of keeping the trigger in the ON position. The trigger lock-on mechanism typically comprises a button or switch that can be moved to a position at which movement of the trigger from the ON position to the OFF position is blocked or otherwise prevented. To allow the trigger to be moved to the OFF position so the tool can be turned off, the trigger much be pressed so that the lock-on mechanism can be moved to a non-blocking position. Until now, there are no known power tools have both a trigger release mechanism and a trigger lock-on mechanism.
The Phnom Penh Municipal Court has summonsed ex-unionist Chea Mony for the second time in an “incitement” case against him after he had failed to show up for the first questioning. Mony did not show up for his first summons on Tuesday, and the second summons was issued the same day. The former Free Trade Union leader is being sued by 120 pro-government union representatives after allegedly calling on the United States and European Union to consider economic sanctions in response to the government's recent crackdown on the opposition. The remarks were made during a Radio Free Asia interview. Mony could not be reached today and is reportedly abroad. Incitement to commit a felony carries a prison sentence of six months to two years, whereas incitement to discriminate carries a jail term of up to three years. Neither the complaint nor the summons letter specifies which incitement Mony is alleged to have committed.
November 25, 2014 • The University of Texas, Austin nabbed the rights to preserve and present the late Nobel winner's collected writings. Also: HBO takes on Scientology, and Aretha Franklin decries her new biography. November 19, 2014 • With 20 nominees across five categories, it's little surprise that the Costa Book Award shortlists prove eclectic. Also: A remembrance of the late novelist and transgender activist Leslie Feinberg.