content
stringlengths 7
2.61M
|
---|
Botnet Detection Using DNS and HTTP Traffic Analysis To perform a large scale attack on the victim, cyber attacker usually prepares thousands if not millions of infected computers to accomplish the goal. Once the infected computers, also called botnet, are ready, they will communicate with the Command and Control (C&C) server to obtain the instruction to perform their acts. Botnet tries to disguise their communication as regular traffic by using commonly used protocols such as HTTP so that their conversation with C&C is not blocked by the firewall. This research explores botnet's footprints using both HTTP and DNS protocols and analyzes their behaviors to select the most appropriate features of HTTP and DNS protocols to be used in our classification model. The developed model has been shown to provide 86% accuracy in distinguishing botnet from benign traffic on the enterprise network. |
A Pennsylvania man is arrested after Harrison police find more than 3 lbs. of marijuana and 248 THC cartridges in the car he was driving.
HARRISON — A Pennsylvania man stopped on a traffic violation was arrested Saturday after police found more than three pounds of marijuana and 248 colorfully-wrapped THC cartridges in the car he was driving.
Kaleigh Beers, 19, of Emmaus, Pennsylvania was charged with second-degree possession of marijuana, a felony.
Police said Officer Sokol Biberaj stopped a car at about 2:30 p.m. on eastbound Westchester Avenue near Kenilworth Road on an unspecified traffic violation. As he spoke to Beers and passengers in the car, Biberaj smelled marijuana, police said.
A search uncovered 3.16 lbs. of marijuana and 248 1-gram cartridges of THC in the car's passenger compartment.
The THC cartridges were individually wrapped in brightly colored packaging, which police said was used in an apparent attempt to sell them to children and teens.
"The THC cartridges we recovered were deliberately designed to look like a candy product in order to appeal to young people. I comment our officer for his keen observations and diligent investigation that enabled us to take these products and this drug dealer off the streets," Harrison Police Chief Joseph Yasinski said in a statement.
Also seized were scales, plastic bags and other paraphernalia commonly used in the sale of marijuana and other drugs.
Beers was taken to the Westchester County Jail in Valhalla, where he was being held without bail. Court information was not available. |
import { Component, Event, EventEmitter, Prop, h } from '@stencil/core';
@Component({
tag: 'kup-modal',
styleUrl: 'kup-modal.scss',
shadow: true,
})
export class KupModal {
@Prop({
mutable: true,
reflectToAttr: true,
})
public visible: boolean;
@Prop()
public header: string;
@Event({
eventName: 'kupModalCancel',
composed: true,
cancelable: false,
bubbles: true,
})
kupModalCancel: EventEmitter;
private handleCancelClick = () => {
this.kupModalCancel.emit();
};
public render() {
const { visible, header, handleCancelClick } = this;
return (
<div class={visible ? 'modal-wrapper visible' : 'modal-wrapper'}>
<div class="modal">
<div class="modal-top">
<span class="modal-cancel" onClick={handleCancelClick}>
X
</span>
</div>
<div class="modal-header">
<span>{header}</span>
</div>
<div class="modal-content">
<slot />
</div>
<div class="modal-bottom"></div>
</div>
</div>
);
}
}
|
<reponame>JeffYFHuang/gpuaccounting
package com.wistron.controller;
import com.wistron.model.Process;
import com.wistron.repository.ProcessRepository;
import com.wistron.utils.ResponseEnvelope;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
//@CrossOrigin(origins = "http://localhost:8081")
@RestController
//@RequestMapping("/api")
public class ProcessController {
@Autowired
ProcessRepository processRepository;
@GetMapping("/processes")
public ResponseEntity<ResponseEnvelope<List<Process> >> getAllProcesses(
@RequestParam(required = false) Long containerId,
@RequestParam(value = "start", required = false) Long start,
@RequestParam(value = "limit", required = false) Long limit,
@RequestParam(value = "startDateTime", required = false) String startDateTime,
@RequestParam(value = "endDateTime", required = false) String endDateTime) {
try {
List<Process> processes = new ArrayList<Process>();
if (containerId == null) {
if (startDateTime == null & startDateTime == null)
processRepository.findAll().forEach(processes::add);
else
processRepository.findAll(startDateTime, endDateTime).forEach(processes::add);
}
else
if (startDateTime == null & startDateTime == null)
processRepository.findProcessesByContainerId(containerId).forEach(processes::add);
else
processRepository.findProcessesByContainerId(containerId, startDateTime, endDateTime).forEach(processes::add);
/*if (processes.isEmpty()) {
return new ResponseEntity<>(HttpStatus.NO_CONTENT);
}*/
return new ResponseEntity<>(new ResponseEnvelope<List<Process> >(200, "success.", processes), HttpStatus.OK);
} catch (Exception e) {
return new ResponseEntity<>(null, HttpStatus.INTERNAL_SERVER_ERROR);
}
}
@PostMapping("/processes")
public ResponseEntity<ResponseEnvelope<List<Process> >> postAllProcesses(
@RequestParam(required = false) Long containerId,
@RequestParam(value = "start", required = false) Long start,
@RequestParam(value = "limit", required = false) Long limit,
@RequestParam(value = "startDateTime", required = false) String startDateTime,
@RequestParam(value = "endDateTime", required = false) String endDateTime) {
try {
List<Process> processes = new ArrayList<Process>();
if (containerId == null) {
if (startDateTime == null & startDateTime == null)
processRepository.findAll().forEach(processes::add);
else
processRepository.findAll(startDateTime, endDateTime).forEach(processes::add);
}
else
if (startDateTime == null & startDateTime == null)
processRepository.findProcessesByContainerId(containerId).forEach(processes::add);
else
processRepository.findProcessesByContainerId(containerId, startDateTime, endDateTime).forEach(processes::add);
/*if (processes.isEmpty()) {
return new ResponseEntity<>(HttpStatus.NO_CONTENT);
}*/
return new ResponseEntity<>(new ResponseEnvelope<List<Process> >(200, "success.", processes), HttpStatus.OK);
} catch (Exception e) {
return new ResponseEntity<>(null, HttpStatus.INTERNAL_SERVER_ERROR);
}
}
@GetMapping("/process/{id}")
public ResponseEntity<Process> getProcessById(@PathVariable("id") long id) {
Optional<Process> processData = processRepository.findById(id);
if (processData.isPresent()) {
return new ResponseEntity<>(processData.get(), HttpStatus.OK);
} else {
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
}
}
} |
/**
* Copyright (c) 2017-2020 The Semux Developers
* <p>
* Distributed under the MIT software license, see the accompanying file
* LICENSE or https://opensource.org/licenses/mit-license.php
*/
package org.semux.sdk.util;
import java.io.UnsupportedEncodingException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.List;
public class Bytes {
private static final SecureRandom secureRandom = new SecureRandom();
/**
* Default charset.
*/
public static final String CHARSET = "UTF-8";
/**
* Empty byte array.
*/
public static final byte[] EMPTY_BYTES = new byte[0];
/**
* Empty address.
*/
public static final byte[] EMPTY_ADDRESS = new byte[20];
/**
* Empty 256-bit hash.
* <p>
* Note: this is not the hash of empty byte array.
*/
public static final byte[] EMPTY_HASH = new byte[32];
private Bytes() {
}
/**
* Generate a random byte array of required length.
*
* @param n
* @return
*/
public static byte[] random(int n) {
byte[] bytes = new byte[n];
secureRandom.nextBytes(bytes);
return bytes;
}
/**
* Merge two byte arrays into one.
*
* @param b1
* @param b2
* @return
*/
public static byte[] merge(byte[] b1, byte[] b2) {
byte[] res = new byte[b1.length + b2.length];
System.arraycopy(b1, 0, res, 0, b1.length);
System.arraycopy(b2, 0, res, b1.length, b2.length);
return res;
}
/**
* Merge byte array and byte
*
* @param b1
* @param b2
* @return
*/
public static byte[] merge(byte[] b1, byte b2) {
byte[] res = new byte[b1.length + 1];
System.arraycopy(b1, 0, res, 0, b1.length);
res[b1.length] = b2;
return res;
}
/**
* Merge byte and byte array.
*
* @param b1
* @param b2
* @return
*/
public static byte[] merge(byte b1, byte[] b2) {
byte[] res = new byte[1 + b2.length];
res[0] = b1;
System.arraycopy(b2, 0, res, 1, b2.length);
return res;
}
/**
* Merge byte arrays into one.
*
* @param bytes byte arrays
* @return
*/
public static byte[] merge(byte[]... bytes) {
return merge(Arrays.asList(bytes));
}
/**
* Merge byte arrays into one.
*
* @param bytes byte arrays
* @return
*/
public static byte[] merge(List<byte[]> bytes) {
int length = 0;
for (byte[] b : bytes) {
length += b.length;
}
byte[] res = new byte[length];
int i = 0;
for (byte[] b : bytes) {
System.arraycopy(b, 0, res, i, b.length);
i += b.length;
}
return res;
}
/**
* Convert string into an byte array.
*
* @param str
* @return
*/
public static byte[] of(String str) {
try {
return str.getBytes(CHARSET);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
/**
* Convert a byte into an byte array.
*
* @param b
* @return
*/
public static byte[] of(byte b) {
return new byte[] { b };
}
/**
* Convert a short integer into an byte array.
*
* @param s
* @return
*/
public static byte[] of(short s) {
byte[] bytes = new byte[2];
bytes[0] = (byte) ((s >> 8) & 0xff);
bytes[1] = (byte) (s & 0xff);
return bytes;
}
/**
* Convert an integer into an byte array.
*
* @param i
* @return
*/
public static byte[] of(int i) {
byte[] bytes = new byte[4];
bytes[0] = (byte) ((i >> 24) & 0xff);
bytes[1] = (byte) ((i >> 16) & 0xff);
bytes[2] = (byte) ((i >> 8) & 0xff);
bytes[3] = (byte) (i & 0xff);
return bytes;
}
/**
* Convert a long integer into an byte array.
*
* @param i
* @return
*/
public static byte[] of(long i) {
byte[] bytes = new byte[8];
bytes[0] = (byte) ((i >> 56) & 0xff);
bytes[1] = (byte) ((i >> 48) & 0xff);
bytes[2] = (byte) ((i >> 40) & 0xff);
bytes[3] = (byte) ((i >> 32) & 0xff);
bytes[4] = (byte) ((i >> 24) & 0xff);
bytes[5] = (byte) ((i >> 16) & 0xff);
bytes[6] = (byte) ((i >> 8) & 0xff);
bytes[7] = (byte) (i & 0xff);
return bytes;
}
/**
* Convert byte array into string.
*
* @param bytes
* @return
*/
public static String toString(byte[] bytes) {
try {
return new String(bytes, CHARSET);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
/**
* Covert byte array into a byte.
*
* @param bytes
* @return
*/
public static byte toByte(byte[] bytes) {
return bytes[0];
}
/**
* Covert byte array into a short integer.
*
* @param bytes
* @return
*/
public static short toShort(byte[] bytes) {
return (short) (((bytes[0] & 0xff) << 8) | (bytes[1] & 0xff));
}
/**
* Covert byte array into an integer.
*
* @param bytes
* @return
*/
public static int toInt(byte[] bytes) {
return ((bytes[0] & 0xff) << 24)
| ((bytes[1] & 0xff) << 16)
| ((bytes[2] & 0xff) << 8)
| (bytes[3] & 0xff);
}
/**
* Covert byte array into a long integer.
*
* @param bytes
* @return
*/
public static long toLong(byte[] bytes) {
return ((bytes[0] & 0xffL) << 56)
| ((bytes[1] & 0xffL) << 48)
| ((bytes[2] & 0xffL) << 40)
| ((bytes[3] & 0xffL) << 32)
| ((bytes[4] & 0xffL) << 24)
| ((bytes[5] & 0xffL) << 16)
| ((bytes[6] & 0xffL) << 8)
| (bytes[7] & 0xff);
}
}
|
<reponame>LuisMiara/dctb-utfpr-2018-1
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package views;
import controllers.NewController;
import controllers.PokemonController;
import javax.swing.JFrame;
import javax.swing.JTextField;
/**
*
* @author nadook
*/
public class NewPanelForm extends javax.swing.JPanel {
private final NewController controller;
private final PokemonController controllerPokemon;
private final JFrame main;
/** Creates new form NewPanelForm
* @param pokePanel
* @param main */
public NewPanelForm(PokemonPanelForm pokePanel, JFrame main) {
initComponents();
controller = new NewController(this);
controllerPokemon = new PokemonController(pokePanel);
this.main = main;
}
public JTextField getTxtName() {
return jtName;
}
public JTextField getTxtHp() {
return jtLife;
}
public JTextField getTxtLevel() {
return jtLevel;
}
public JTextField getTxtAgility() {
return jtAgility;
}
public JTextField getTxtAttack() {
return jtAttack;
}
public JTextField getTxtDefense() {
return jtDefense;
}
public JTextField getTxtSpecialAttack() {
return jtSpecialAttack;
}
public JTextField getTxtSpecialDefense() {
return jtSpecialDefense;
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jLabel2 = new javax.swing.JLabel();
jPanel2 = new javax.swing.JPanel();
jLabel1 = new javax.swing.JLabel();
jtName = new javax.swing.JTextField();
jLabel3 = new javax.swing.JLabel();
jtLife = new javax.swing.JTextField();
jLabel4 = new javax.swing.JLabel();
jtLevel = new javax.swing.JTextField();
jLabel5 = new javax.swing.JLabel();
jtAgility = new javax.swing.JTextField();
jLabel6 = new javax.swing.JLabel();
jtAttack = new javax.swing.JTextField();
jLabel7 = new javax.swing.JLabel();
jtDefense = new javax.swing.JTextField();
jLabel8 = new javax.swing.JLabel();
jtSpecialAttack = new javax.swing.JTextField();
jLabel9 = new javax.swing.JLabel();
jtSpecialDefense = new javax.swing.JTextField();
jbConfirm = new javax.swing.JButton();
jbCancel = new javax.swing.JButton();
jLabel2.setText("Novo Pokemon");
jPanel2.setBorder(javax.swing.BorderFactory.createBevelBorder(javax.swing.border.BevelBorder.RAISED));
javax.swing.GroupLayout jPanel2Layout = new javax.swing.GroupLayout(jPanel2);
jPanel2.setLayout(jPanel2Layout);
jPanel2Layout.setHorizontalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 336, Short.MAX_VALUE)
);
jPanel2Layout.setVerticalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 0, Short.MAX_VALUE)
);
jLabel1.setText("Nome:");
jLabel3.setText("Vida:");
jLabel4.setText("Level:");
jtLevel.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jtLevelActionPerformed(evt);
}
});
jLabel5.setText("Agilidade:");
jLabel6.setText("Ataque:");
jLabel7.setText("Defesa:");
jLabel8.setText("Ataque Especial:");
jLabel9.setText("Defesa Especial:");
jbConfirm.setText("Cadastrar");
jbConfirm.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jbConfirmActionPerformed(evt);
}
});
jbCancel.setText("Cancelar");
jbCancel.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jbCancelActionPerformed(evt);
}
});
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(layout.createSequentialGroup()
.addGap(16, 16, 16)
.addComponent(jLabel2, javax.swing.GroupLayout.PREFERRED_SIZE, 106, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createSequentialGroup()
.addGap(75, 75, 75)
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jtName, javax.swing.GroupLayout.PREFERRED_SIZE, 152, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(jPanel2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createSequentialGroup()
.addGap(13, 13, 13)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel8)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jtSpecialAttack, javax.swing.GroupLayout.PREFERRED_SIZE, 152, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(40, 40, 40)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jLabel6)
.addComponent(jLabel7)
.addComponent(jLabel5))
.addGap(6, 6, 6))
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jLabel4)
.addComponent(jLabel3))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)))
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jtLevel, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, 152, Short.MAX_VALUE)
.addComponent(jtAgility, javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jtAttack, javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jtDefense, javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jtLife)))
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel9)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jtSpecialDefense, javax.swing.GroupLayout.PREFERRED_SIZE, 152, javax.swing.GroupLayout.PREFERRED_SIZE)))))
.addGroup(layout.createSequentialGroup()
.addGap(60, 60, 60)
.addComponent(jbConfirm)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jbCancel)
.addGap(49, 49, 49)))
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(15, 15, 15)
.addComponent(jLabel2, javax.swing.GroupLayout.PREFERRED_SIZE, 36, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jPanel2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel1)
.addComponent(jtName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel3)
.addComponent(jtLife, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel4)
.addComponent(jtLevel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel5)
.addComponent(jtAgility, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel6)
.addComponent(jtAttack, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel7)
.addComponent(jtDefense, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel8)
.addComponent(jtSpecialAttack, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(10, 10, 10)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel9)
.addComponent(jtSpecialDefense, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(29, 29, 29)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jbConfirm)
.addComponent(jbCancel))
.addContainerGap(28, Short.MAX_VALUE))
);
}// </editor-fold>//GEN-END:initComponents
private void jtLevelActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jtLevelActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_jtLevelActionPerformed
private void jbConfirmActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jbConfirmActionPerformed
controller.insert();
controllerPokemon.setTable();
main.dispose();
}//GEN-LAST:event_jbConfirmActionPerformed
private void jbCancelActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jbCancelActionPerformed
main.dispose();
}//GEN-LAST:event_jbCancelActionPerformed
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JLabel jLabel5;
private javax.swing.JLabel jLabel6;
private javax.swing.JLabel jLabel7;
private javax.swing.JLabel jLabel8;
private javax.swing.JLabel jLabel9;
private javax.swing.JPanel jPanel2;
private javax.swing.JButton jbCancel;
private javax.swing.JButton jbConfirm;
private javax.swing.JTextField jtAgility;
private javax.swing.JTextField jtAttack;
private javax.swing.JTextField jtDefense;
private javax.swing.JTextField jtLevel;
private javax.swing.JTextField jtLife;
private javax.swing.JTextField jtName;
private javax.swing.JTextField jtSpecialAttack;
private javax.swing.JTextField jtSpecialDefense;
// End of variables declaration//GEN-END:variables
}
|
“No Plans” to Stream Thursday’s PlayStation ‘Slim’ Event, “Don’t Anticipate Any Software Announcements”
This Thursday, PlayStation will be holding an event to show us exactly what they mean by “following the biggest launch in PlayStation history, join us for an introduction to the slimmest.”
While speculation is running wild that we’ll see the PlayStation Vita 2000 come West, you unfortunately won’t be able to watch the PlayStation UK event live through a stream, and you shouldn’t expect any new game announcements, as SCEE Blog Manager Fred Dutton said:
No, we’ve no plans [to stream the media event on Thursday]. It’s a PlayStation UK event. In the interest of managing expectations, please don’t anticipate any software announcements.
Whatever happens to be unveiled this Thursday, we’ll be sure to let you know all about it.
Are you expecting the PS Vita 2000 to be given a Western release date this Thursday, or do you think that ‘slim’ announcement will be something else? Let us know in the comments below. |
/**
* Checks if the page's content is truncated.
* @param url
* @param page
* @return If the page is truncated <code>true</code>. When it is not,
* or when it could be determined, <code>false</code>.
*/
public static boolean isTruncated(String url, WebPage page) {
ByteBuffer content = page.getContent();
if (content == null) {
return false;
}
Utf8 lengthUtf8 = page.getFromHeaders(new Utf8(HttpHeaders.CONTENT_LENGTH));
if (lengthUtf8 == null) {
return false;
}
String lengthStr = lengthUtf8.toString().trim();
if (StringUtil.isEmpty(lengthStr)) {
return false;
}
int inHeaderSize;
try {
inHeaderSize = Integer.parseInt(lengthStr);
} catch (NumberFormatException e) {
LOG.warn("Wrong contentlength format for " + url, e);
return false;
}
int actualSize = content.limit();
if (inHeaderSize > actualSize) {
LOG.warn(url + " skipped. Content of size " + inHeaderSize
+ " was truncated to " + actualSize);
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug(url + " actualSize=" + actualSize + " inHeaderSize=" + inHeaderSize);
}
return false;
} |
/*
* RTC based high-frequency timer
*
* Copyright (C) 2000 Takashi Iwai
* based on rtctimer.c by Steve Ratcliffe
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/log2.h>
#include <sound/core.h>
#include <sound/timer.h>
#if defined(CONFIG_RTC) || defined(CONFIG_RTC_MODULE)
#include <linux/mc146818rtc.h>
#define RTC_FREQ 1024 /* default frequency */
#define NANO_SEC 1000000000L /* 10^9 in sec */
/*
* prototypes
*/
static int rtctimer_open(struct snd_timer *t);
static int rtctimer_close(struct snd_timer *t);
static int rtctimer_start(struct snd_timer *t);
static int rtctimer_stop(struct snd_timer *t);
/*
* The hardware dependent description for this timer.
*/
static struct snd_timer_hardware rtc_hw = {
.flags = SNDRV_TIMER_HW_AUTO |
SNDRV_TIMER_HW_FIRST |
SNDRV_TIMER_HW_TASKLET,
.ticks = 100000000L, /* FIXME: XXX */
.open = rtctimer_open,
.close = rtctimer_close,
.start = rtctimer_start,
.stop = rtctimer_stop,
};
static int rtctimer_freq = RTC_FREQ; /* frequency */
static struct snd_timer *rtctimer;
static struct tasklet_struct rtc_tasklet;
static rtc_task_t rtc_task;
static int
rtctimer_open(struct snd_timer *t)
{
int err;
err = rtc_register(&rtc_task);
if (err < 0)
return err;
t->private_data = &rtc_task;
return 0;
}
static int
rtctimer_close(struct snd_timer *t)
{
rtc_task_t *rtc = t->private_data;
if (rtc) {
rtc_unregister(rtc);
tasklet_kill(&rtc_tasklet);
t->private_data = NULL;
}
return 0;
}
static int
rtctimer_start(struct snd_timer *timer)
{
rtc_task_t *rtc = timer->private_data;
if (snd_BUG_ON(!rtc))
return -EINVAL;
rtc_control(rtc, RTC_IRQP_SET, rtctimer_freq);
rtc_control(rtc, RTC_PIE_ON, 0);
return 0;
}
static int
rtctimer_stop(struct snd_timer *timer)
{
rtc_task_t *rtc = timer->private_data;
if (snd_BUG_ON(!rtc))
return -EINVAL;
rtc_control(rtc, RTC_PIE_OFF, 0);
return 0;
}
static void rtctimer_tasklet(unsigned long data)
{
snd_timer_interrupt((struct snd_timer *)data, 1);
}
/*
* interrupt
*/
static void rtctimer_interrupt(void *private_data)
{
tasklet_schedule(private_data);
}
/*
* ENTRY functions
*/
static int __init rtctimer_init(void)
{
int err;
struct snd_timer *timer;
if (rtctimer_freq < 2 || rtctimer_freq > 8192 ||
!is_power_of_2(rtctimer_freq)) {
snd_printk(KERN_ERR "rtctimer: invalid frequency %d\n",
rtctimer_freq);
return -EINVAL;
}
/* Create a new timer and set up the fields */
err = snd_timer_global_new("rtc", SNDRV_TIMER_GLOBAL_RTC, &timer);
if (err < 0)
return err;
timer->module = THIS_MODULE;
strcpy(timer->name, "RTC timer");
timer->hw = rtc_hw;
timer->hw.resolution = NANO_SEC / rtctimer_freq;
tasklet_init(&rtc_tasklet, rtctimer_tasklet, (unsigned long)timer);
/* set up RTC callback */
rtc_task.func = rtctimer_interrupt;
rtc_task.private_data = &rtc_tasklet;
err = snd_timer_global_register(timer);
if (err < 0) {
snd_timer_global_free(timer);
return err;
}
rtctimer = timer; /* remember this */
return 0;
}
static void __exit rtctimer_exit(void)
{
if (rtctimer) {
snd_timer_global_free(rtctimer);
rtctimer = NULL;
}
}
/*
* exported stuff
*/
module_init(rtctimer_init)
module_exit(rtctimer_exit)
module_param(rtctimer_freq, int, 0444);
MODULE_PARM_DESC(rtctimer_freq, "timer frequency in Hz");
MODULE_LICENSE("GPL");
MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_RTC));
#endif /* CONFIG_RTC || CONFIG_RTC_MODULE */
|
Fuchsia Dunlop is one of our go-to guides for Chinese cooking. When we can't go shopping with her in person, we constantly return to her excellent cookbooks—Land of Plenty, which features Sichuan cooking, Revolutionary Chinese Cookbook, her Hunan-focused collection, and Every Grain of Rice, which celebrates the simpler side of making authentic Chinese food at home. Her memoir, Shark's Fin and Sichuan Pepper, is a must-read.
I asked Dunlop about her Chinese cooking idols, her favorite cookbooks, and the regional Chinese cuisines we should all know more about.
How did you come to learn Chinese cooking? I've always loved cooking, but became involved in China through a sub-editing job at the BBC, which led me to visit the country and start learning Mandarin. I first visited the Sichuanese capital Chengdu in 1993, and a local acquaintance and his wife took me under their wing and introduced me to Sichuanese street food and dishes such as fish-fragrant eggplant and fire-exploded kidney flowers: as we sat drinking tea together on the riverbank on my last day, I vowed to return to the city.
I won a scholarship to study at Sichuan University the following year, and was immediately seduced by the myriad flavors of the local food. It was so exciting, and so different from any Chinese food I'd encountered in restaurants in England. I started learning to cook informally, in the kitchens of Chinese friends and small restaurants around the university, and later enrolled as a full-time student at the Sichuan Institute of Higher Cuisine.
Who are your Chinese cooking heroes? My first foray into the world of Chinese cooking was through the books of the Hong Kong-born writer Yan-kit So (see below): I remember making her Sichuanese fish in chilli bean sauce before I'd ever been to China. She eventually became a friend and mentor, and I was always impressed by both her fabulous cooking and her scholarly approach to her work.
In Chengdu, the chef Yu Bo and his wife and business partner Dai Shuang are extraordinary in their depth of knowledge and commitment to Chinese culinary traditions, and I've learned so much from them.
In recent years, I've been particularly inspired by the Hangzhou restaurateur Dai Jianjun (otherwise known as A Dai), who is trying to preserve and revive the lore and skills of local agriculture and gastronomy. In general, the Chinese friends and acquaintances and total strangers who have taught, encouraged and inspired me are too numerous to mention!
What books on Chinese cooking do you love and why? Yan-kit So's Classic Chinese Cookbook and Classic Food of China. The former is an excellent guide to the basics of Chinese cooking, and the latter has a introduction which gives a superb overview of Chinese culinary history and culture. A Tradition of Soup: Flavors from China's Pearl River Delta by Teresa M. Chen is a gem of a book about Cantonese tonic soups.
My dog-eared old textbooks from my time at cooking school in Chengdu are very dear to me, not just for the recipes but because they remind me of that period of my life. I also love Yuan Mei's Suiyuan Shidan, a fascinating and amusing cookbook from the late eighteenth century. In general, I tend to use Chinese cookery books published in China, which are almost all in Chinese. One of my favorites is a lavishly illustrated series on Chinese regional cuisines that was published in the 1980s—I can spend hours just looking through the photographs, [my] mouth watering.
What lesser-known Chinese cuisines or dishes do you think English-speaking readers need to know more about? I'm not sure where to begin! The mere idea of 'Chinese food' tends to obscure the vast regional differences in Chinese culinary styles. Have dinner, say, in northern Xi'an, and then fly to southern Hangzhou the next day and you might as well be in another country, foodwise. Even within provinces and regions, many cities, towns and smaller areas have not only their own speciality dishes, but distinct culinary styles. Some of my personal favorites: the "Chiuchow" cooking of a pocket of northeastern Guangdong province; the delicate cooking of the Southern Yangtze region, including Shanghai and the Zhejiang and Jiangsu provinces; the disparate styles of southwestern Yunnan... There are also culinary schools associated with Buddhist monasteries (vegetarian food without pungent vegetables such as onions and garlic), Hui Muslims (lots of lamb and beef) and different minority groups, such as the Uyghur people of Xinjiang, whose food is a fascinating bridge between the cuisines of China and Central Asia. You could eat a different 'Chinese' dish every day of your life and probably never have a repeat.
There is still a shortage of good information in English about Chinese regional cuisines, but this is beginning to change. The Cleaver Quarterly is a newish magazine specializing in the food of China that is delving more deeply than mainstream media into Chinese foodways. One or two new regional Chinese cookbooks have either been published or are on the horizon. And of course there are growing numbers of interesting regional Chinese restaurants in big cities like London and New York that offer tantalizing glimpses of the diversity of Chinese cooking.
What do you cook when you're not cooking Chinese food? These days my life tends to revolve around Chinese food, but I used to cook a lot of Turkish (I spent a summer living with a Turkish family in my 20s, and learned how to make köfte, stuffed vegetables and other dishes ). I also cook a bit of Italian, French, and English. And whenever I travel anywhere, I end up bringing back recipes and ideas that influence my own cooking.
What was the first cookbook you really loved? Prue Leith and Caroline Waldegrave's Leith's Cookery Course, a hefty hardback introduction to classic English/French cookery. I was given it by an Italian lodger when I was 11, and cooked from it throughout my teenage years. It taught me how to make choux pastry and patisserie cream, how to poach a salmon and pluck a pheasant—all kinds of useful recipes and skills. I still have it in my kitchen.
What is your favorite cookbook for dinner party inspiration? I love Marcella Hazan's and Simon Hopkinson's recipes, and I grew up cooking from Claudia Roden. I'm a great fan of Fergus Henderson's cooking and writing—his Complete Nose to Tail: A Kind of British Cooking is one of my favorites. Chez Panisse Desserts by Lindsey Remolif Shere is fabulous—in particular, the recipe for blood orange tart, which is one of the most ethereally delicious things I've ever made from a book.
This post may contain links to Amazon or other partners; your purchases via these links can benefit Serious Eats. Read more about our affiliate linking policy. |
I recently did an interview for CSDN.net, which stands for “China Software Developer Network”, or more colloquially, “Programmer Magazine”, about open source hardware, and what it means to be a hacker.
The interview itself is in Chinese (print version), but I thought I’d post the English translation here for non-Chinese readers.
[biographical details and photo omitted from this translation. Caption to the bio photo reads: “My PhD advisor in college, Dr. Tom Knight, had a profound impact upon me. I often asking myself “what would Tom do?” when I am stuck or lost, and the answer I come back with is usually the right one.”]
About Open-source Hardware and the Maker movement
CSDN: The Maker and Open hardware movements attract a lot of attention recent years, Chris Anderson wrote a book Makers, Paul Graham called it The Hardware Renaissance. From your perspective and observation, how do you think about this movement will affect ordinary people, developers and our IT industry?
bunnie: This movement, as it may be, is more of a symptom than a cause, in my opinion. Let’s review how we got here today.
In 1960, for all practical purposes there was only hardware, and it was all open. When you bought a transistor radio, it had its schematic printed in the back. If it broke, you had to fix it yourself. It was popular to buy kits to make your own radios.
Around 1980-1990, the personal computer revolution began. Computers started to become powerful enough to run software that was interesting and enabling.
From 1990-2005, Moore’s Law drove computers to be twice as fast, and have twice as much memory every 1.5-2 years. All that mattered in this regime is the software; unless you could afford to fab a chip in the latest technology, it wasn’t worth it to make hardware, because by the time you got the components together a new chip was out that made your design look slow. It also wasn’t worth it to optimize software. By the time you made your software optimized, you could have bought a faster computer and run the old software even faster. What mattered was features, convenience, creativity. “Making” fell out of fashion: you had to ship code or die, there was no time to make.
From 2005-2010, computers didn’t get much faster in terms of MHz, but they got smaller. Smartphones were born. Everything became an app, and everything is becoming connected.
From about 2010-now, we find that Moore’s Law is slowing down. This slowdown is rippling through the innovation chain. PCs are not getting faster, better, or cheaper in a meaningful way; basically we buy a PC just to replace a broken one, not because it’s so much better. It’s a little bit too early to tell, but smartphones may also be solidifying as a platform: the iPhone5 is quite similar to the iPhone4; the Samsung phones are also looking pretty similar across revisions.
How to innovate? how to create market differentiation? With Moore’s Law slowing down, it’s now possible to innovate in hardware and not have your innovation look slow because a new chip came out. You have steady platforms (PCs, smartphones, tablets) which you can target your hardware ideas toward. You don’t have to necessarily be fabbing chips just to have an advantage. We are now sifting through our past, looking for niches that were overlooked during the initial fast-paced growth of technology. Even an outdated smartphone motherboard can look amazing when you put it in quadcopters, satellites, HVAC systems, automobiles, energy monitoring systems, health monitoring systems, etc.
Furthermore, as humans we fundamentally feel differently toward “real” things as opposed to “virtual” things. As wonderful as apps are, a human home consists of more than a smartphone, a food tray, a bed and a toilet. We still surround ourselves with knick-knacks, photos of friends, physical gifts we give each other on special occasions. I don’t think we will ever get to the point where getting a “virtual” teddy bear app will be as valued as getting a “real” teddy bear.
As a result, there will always be a place for people to make hardware that fills this need for tangible goods. This hardware will merge more technology and run more software, but in the end, there is a space for Makers and hardware startups, and that space is just getting bigger now that hardware technology is stabilizing.
CSDN: Compared with the past, Arduino and Raspberry Pi’s appearance seems to reduce the threshold of doing hardware design, what do you think this will effect hardware product industry? Do you think these platforms will bring real leaps and bounds in the industry? Or to make really innovative hardware product, what do we need?
bunnie: Arduino and RPi serve specific market niches.
Arduino’s key contribution is the reduction of computation to a easy-to-use physical form. It was made first and foremost by designers and artists, and less so by technologists. This unique perspective on technology turned out to be very powerful: it turns out people who aren’t programmers or hardware designers also want to access hardware technology.
Some very moving and deep interactive art pieces have been made using the Arduino, allowing hardware to transcend menial control applications into something that changes your mood or makes you think about life differently. I think Arduino is just the first step toward taking the “tech” out of technology and letting every day people not just use technology, but create with it. There will be other platforms, for sure.
Raspberry Pi is a very inexpensive embedded hardware reference module; it is cheap enough that for many applications, it’s just fine to buy the RPi as-is and you don’t have to design your own hardware. I think there will also be other followers in their footsteps. The nice thing about RPi for hardware professionals is that instead of buying a reference design and then having to spin your own board, you can just buy the RPi and ship it in the product. For people who have relatively low-volume products, this makes sense.
As an ongoing trend, I see product design becoming more feasible at low volumes. There will still be the million-unit blockbusters for things like smartphones and coffee makers, but there will also be a new market for 1k-10k of something, but with a much higher margin. These small run products will be developed and sold by teams of just one or two people, so that the profit on such a small run of product is still a good living for the individuals. The key to the success of these products is that they are highly customized and help solve the exact problem a small group of users have, and because of this the users are willing to pay
more for it.
CSDN: When new concept or technologies first appear, they always cause a lot of optimism discussion, but most of them, only take a long time development, will really affect our lives. When we are talking about Marker or Open hardware movement, are we too optimistic? For the average person, if there are common misunderstandings about this field?
bunnie: Yes, it does take a long time for technology to really change our lives.
The Maker movement, I think, is less about developing products, and more about developing people. It’s about helping people realize that technology is something man-made, and because of this, every person has the power to control it: it just takes some knowledge. There is no magic in technology. Another way to look at it is, we can all be magicians with a little training.
Open Hardware is more of a philosophy. The success or failure of a product is largely disconnected with whether the hardware is open or closed. Closing hardware doesn’t stop people from cloning or copying, and opening hardware doesn’t mean that bad ideas will be copied simply because they are open. Unlike software, hardware requires a supply chain, distribution, and a network of relationships to build it at a low cost. Because of this, hardware being open or closed is only a small part of the equation for hardware, and mostly it’s a question of how much you want to involve end users or third parties to modify or interoperate with your product.
CSDN: When we look at the future of open source hardware, could we analogy it with the open source software industry that we have already seen (many commercial companies also support open source software, many software companies to provide support for the open source software for living)? What are the difference between them?
bunnie: I don’t think the analogy is quite the same. In software, the cost to copy, modify, and distribute is basically zero. I can download a copy of linux, and run “make” and have the same high-quality kernel running on my desktop as runs on top-end servers and supercomputers.
In hardware, there is a real cost to copy hardware. And the cost of the parts, the factories and skilled workers used to build them, the quality control procedures, and the process to build are all important factors in how the final hardware cost, looks, feels, and performs. Simply giving someone a copy of my schematics and drawings doesn’t mean they can make exactly my product. Even injection molding has art to it: if I give the same CAD drawing to two tooling makers, the outcome can be very different depending on where the mold maker decides to place the gates, the ejector pins, the cooling for the mold, the mold cycle time, temperature, etc.
And then there is the distribution channel, reverse logistics, financing, etc.; even as the world becomes more efficient at logistics, you will never be able to buy a TV as easily as you can download the movies that you watch on the same TV.
CSDN: What kind of business model do you think is ideal for open source hardware company? Could you give an example?
bunnie: One of my key theories behind open source hardware is that hardware, at least at the level of schematics and PCB layout, is “essentially open”. This is because for a relatively small amount of money you can pay services to extract the detail required to copy a PCB design. Therefore an asymptotic assumption is that once you have shipped hardware, it can be copied.
If you can accept this assumption, then not releasing schematics and PCB layouts will not stop people from copying your goods. It will be copied if someone wants to copy it. So it makes no difference to copying whether or not you have shared your design files.
However, if you do share your design files, it does make a difference to a different and important group of people. There are other businesses and individual innovators who could use your design files to design accessories, upgrades, or third party enhancements that rely upon your product.
Thus, in a “limiting case”, sharing your design files doesn’t change the situation on copying, but does improve your opportunity for new business relationships. Therefore, the practical suggestion is to just share the design files, using the guidelines of the open source hardware licenses to help reserve a few basic rights and protections.
Clearly, there are some hardware strategies that are not compatible with the idea of open hardware. If your sole value to the consumer is your ability to make stand-alone hardware, and you have no strategic advantage in terms of cost, then you would like to keep your plans secret to try to delay the low-cost copiers for as long as you can.
However, we are finding today that the most innovative products are not just a piece of hardware, but they also involve software and services. Open hardware business models work better when they are in such hybrid products. In many cases, consumers are willing to pay annuity revenue in some form (e.g. subscriptions, advertising, upsells, accessories, royalties and upgrades) for many types of products. In fact, it is most profitable to just collect these fees and not involve yourself in the hardware manufacturing portion; and it is much easier to control access to an ongoing service than it is to the plans of a piece of hardware.
Thus, if you are running an on-line service that is coupled to your hardware, open hardware makes a lot of sense: if your on-line service is profitable, letting other people copy the hardware, sell it, and then add more users to your on-line service simply means you get more revenue without more risk in the production of hardware.
CSDN: We know you often come to China and know a lot about this country. China’s software technology is not advanced, do you think its current position — the world factory center will help it improve its overall level of technology? How can this country change to a design, research and development focus place but not just a manufacturing center? What is China missing?
bunnie: I wouldn’t say I know much about China. I know a little bit about one small corner of China in one specific area — hardware manufacturing. If there is one thing I do know about China, it’s that it is a very big country, with many different kind of people, and a long history that I am only beginning to understand.
However, the history of high technology is almost entirely contained within my life span, so I can comment on the relationship between high technology and people, from which we can derive some perspective about China.
The first observation is that every technology power-house today started with manufacturing. The US started as simple colonies of Britain, mining ores, trapping furs and farming cotton and tobacco. Over time, the US had steel mills and linen production. It wasn’t until the early 1900’s before the US really started to rise in developing original technology, and not until the mid 1900’s before things really took off.
The same goes for Japan. They started in manufacturing, copying many of the US made goods. In fact the first cars and radios they made were, if you believe the historical accounts, not so good. It took the US and Japan decades to go from a manufacturing-based economy to a service-based economy.
If you compare that to China, the electronics manufacturing industry started there maybe only 20 years ago, at most, and China is just turning the corner from being a manufacturing-oriented economy to one that can do more design and software technology. I believe this is a natural series of events: some portion of entry-level workers will eventually become technicians, then some technicians become designers, then some designers become successful entrepreneurs.
In terms of concrete numbers, if you have 10 million factories workers, maybe 1% of them will learn enough from their job such that after a few years they can become technicians. This gives you 100,000 technicians. After a few years of technician work, maybe 1% will gain enough skill to become original designers. This gives you 1,000 designers. These experienced, grass-roots designers become the core of your entrepreneurial economy, and from there the economy begins to transform.
From a thousand companies, you will eventually winnow down to just a handful of global brand companies. This whole process takes a decade or two, and I believe we are currently witnessing China going through the final phase of the transformation, where we now have a lot of people in Shenzhen who have the experience of manufacturing, the wisdom to do design, and now are just starting to apply their talent to innovation and original product design. The next decade will be an exciting one for China’s technology industry, if the current policies on economic and intellectual development stay roughly on course.
This pattern applies primarily to hardware or hardware-dominated products. Software products have a similar pattern, but I believe there are unique cultural aspects that can give the West an advantage in software design. In hardware, if a process is not efficient or producing low yield, one can easily identify the root cause and produce direct physical evidence of the problem. Hardware problems, in essence, are indisputable.
In software, if code is not efficient or poorly written, it’s very hard to identify the exact problem that causes it. One can see the evidence of programs crashing or running slowly, but there is no broken wire or missing screw you can simply hold up and show everyone to show why the software is broken. Instead, you need to sit down with your collaborators and review a complex design, consider many opinions, and ultimately you need to identify a problem which is ultimately due to a bad decision made by an individual, and nothing more than that. All software APIs are simply constructs of human opinions: nothing more.
Asian cultures have a strong focus on guanxi, reputation, and respect for the elders. The West tends to be more rebellious and willing to accept outsiders as champions, and they have less respect for the advice of elders. As a result, I think it’s very culturally difficult in an Asian context to discuss code quality and architectural decisions. The field of software itself is only 30 years old, and older, more experienced engineers are also the most out of date in terms of methodology and knowledge. In fact, the young engineers often have the best ideas. However, if it’s culturally difficult for the young engineers to challenge the decisions of the elder engineers, you end up with poorly architected code, and you have no hope to be competitive.
It’s not hopeless to overcome these obstacles, but it requires a very strong management philosophy to enforce the correct incentives and culture. The workers should be rewarded fairly for making correct decisions, and there can be no favorites based upon friendship, relationship, or seniority. Senior engineers and managers must see a real financial reward for accepting their mistakes, instead of saving face by forcing junior engineers to code patches around their bad high level decisions. Usually, this alignment is achieved in US contexts by sharing equity in a company among the engineers, so that the big payout only comes if the company as a whole survives, regardless of the ego of the individual.
CSDN: What do you think the role (or relationship) of individual maker and commercial company in the future? And as individual maker may not only compete with commercial companies, but also will compete with other makers in the future, what are the factors critical to the success of a product?
bunnie: As a general trend, MOQs are reducing and innovation is getting closer to the edge. So I think commercial companies will see more competition from Makers, especially as the logistics industry transforms itself into a API that can plug directly into websites.
At the end of the day, the most critical factors to success will still be how much value the consumer will perceive from the product. A part of this is related to superior features and good product quality, but also an important part of this is in the presentation to the consumer and how clearly the benefits are explained. As a result, it’s important to make sure the product is visually appealing, easy to use, and to create marketing material that clearly explains the benefit of the product to the customer. This is often a challenge for individual makers, because their talent is normally in the making of the product’s technical value, but less so on the marketing and sales value. Makers who can master both will have an edge over makers who focus specifically on offering just a technical value.
About Hardware Hackers
CSDN: You have participated in the development process of many products, what is your personal goal? In this long period, what is the greatest pleasure?
bunnie: I would like to make people happy by building things that improve their life in some way. The greatest pleasure is to see someone enjoying something you have made because you have improved their life in some small way. Sometimes your product is solving a big problem for them. Other times it is more whimsical and happiness comes from fun or beauty. But either way, you are helping another person.
That is what is important to me. One thing I have learned in the past few years is that money beyond a certain level doesn’t make me any happier. This makes me difficult to work with, because it’s hard for people to just hire my service by offering me a lot of money. Instead, they need to convince me that the activity will somehow also make people happy.
Another important goal for me is to just understand how the world works. I have a natural curiosity and I want to learn and understand all kinds of things. The universe has a lot of patterns to it, and sometimes you will find seemingly unrelated pieces fitting together just like magic. Discovering these links and seeing the world fit together like a big jigsaw puzzle is very profound and satisfying.
CSDN: Which project you focus on recently? Why you choose them?
bunnie: I haven’t really been focused, actually. Recently, I have intentionally been *not* focused. If you focus on one idea for too long, many good innovative ideas just pass you by.
One of the hardest parts of being an entrepreneur or innovator is to have the patience to review many ideas, know when to say no to bad ideas, and then cultivate a few good ideas at the same time, and to accept many changes to your concept along the way. This process can take months, if not years.
I have a project to build my own laptop, but it’s not meant to be a business. It’s a subject meant to encourage personal growth, to challenge my abilities and learn things that I don’t know about the entire computer design process.
I have a project to reverse engineer the firmware in SD cards. It’s also not a business, it’s meant to satisfy my curiosity.
I have a project to learn how companies in Shenzhen build such amazing phones for such a low price. If I can learn some of their techniques, then hopefully I can apply this knowledge to create new and compelling products that make people happy.
I have a project to build flexible circuit stickers. It will hopefully help introduce more people into using technology in their everyday life. It will also challenge my understanding of manufacturing processes, as I must develop a few new processes that don’t exist today to make the stickers robust and inexpensive enough for every day use.
I have a project to help advise new entrepreneurs and innovators, and help get them started on their adventures. I may not have the best ideas, or all the talent needed to make a business, but if I can help teach others to fly, then maybe that can make up for my inability to find success in business.
So, I have many projects, and no focus. Maybe someday I will find one to focus on — sometimes it is also important to focus — but as of today, I haven’t found what I’m looking for.
CSDN: Failure tends to give people more experience, could you talk about the not-so-successful projects you have participated, or if you’ve ever seen other failed projects that inspired you.
bunnie: My life is a story of failures. The only thing I have done repeatedly and reliably is fail. However, I have two rules when handling failure: (1) don’t give up and (2) don’t make the same mistake twice. If you follow these rules, eventually, you will find a success after many failures.
Actually, I have an interview that focuses on one of my recent failures. You can read it here.
CSDN: Your book Hacking the Xbox has been published ten years, for people who want to learn reverse engineering, or want to become a hardware hacker today, whether these experiences and skills has still apply?
bunnie: I’d like to think that the core principles covered in the book are still relevant today. The Xbox is simply an example of how to do things, but the approach described in the book and the techniques are applicable to a broad range of problems.
For the Chinese audience, I have found the mobile phone repair manuals to be quite interesting to read. I have bought a couple of them, and even though I can’t read Chinese well, I still find them pretty interesting. Sometimes their descriptions on the theory of electronics are not completely accurate, but practically speaking they are good enough, and it’s a quick way to get started while learning immediately useful skills in repairing phones.
There’s also a Chinese magazine, 无线电, which I have found to be quite good. If you get started building the projects in there, I think you will learn very quickly.
CSDN: For users, the new Xbox One has more stringent restrictions, what do you think about this? Do you have interesting to explore this black box and upgrade your book?
bunnie: I haven’t done much work on video game consoles in a while — there is a whole new generation of console hackers who are excited to explore them, and I’m happy for that. As for the Xbox One security, I’m sure it is one of the most secure systems built. They did a very good job on the Xbox360, and I know some of the Xbox One security team members and they have a very solid understanding of the principles needed to build secure hardware. It should be very hard to crack.
That being said, I’m glad I have no desire to buy or use one. I think it would become very frustrated with their use policies and restrictions very quickly.
CSDN: There are a lot of controversy about if electronic devices should have a lock to prevent user rooting, what do you think about this? Whether there is a contradiction between ensure the safety of user and make user get complete control of their device?
bunnie: I believe users should “own” their hardware, and “owning” means having the right to modify, change, etc. including root access rights. If the company has a concern about users being unsafe, then it’s easy enough to include an “opt-out” where users can simply select an electronic waiver form, and give up their support and warranty right to gain access to their own machine. Most people who care to root their machine are already smarter than the phone support they would be calling inside the company, so anyways it’s not a problem.
The laws have changed to make it illegal to do some rooting activities, even to hardware that you bought and own. I think this reduction in our natural rights of ownership is dangerous and can lead to consumers being put in an unfair situation, and it also discourages consumers to explore and learn more about the technology they have become so dependent upon.
CSDN: The integration of hardware is increasing, do you think doing hardware hacking is getting more and more difficult or do you worry about the hardware hackers to extinct? How could we change this situation?
bunnie: The increasing integration has been true for a long time — from the TX-0 which just used transistors, to the Apple II which used TTL ICs, to the PCs which used controller chipsets, to the mobile phones which have just a single SoC now. It does make it harder to hack some parts, but there is always opportunities at the system integration level.
In other words, I still think there is art in hardware, just the level at which hardware hackers have to work gets higher every day, and this is a good thing, because it means our hacks are also getting more powerful with time as well.
CSDN: Your book is dedicated to Aaron Swartz, could you talk about why do you think the hacker spirit is important in our era today.
bunnie: The hacker spirit is the ultimate expression of human problem solving ability. It’s about the ability to see the world for what it is, and not the constructs and conventions that society puts in place. A brick is not just used to make buildings, it can be a doorstop, a weapon, a paperweight, a heating ballast, or it can be ground up and used for soil. Hackers question convention through the lens of doing what’s most practical and correct for the situation at hand: they see things for what they are, and not by the labels put on them. Sometimes their methods are not always harmonious, as hackers often times prioritize doing the right thing over being nice or playing by the rules.
I find the more difficult situations become, the more pervasive and stronger the hacker spirit becomes among common people. I see evidence of this around the world. It is linked to the human will to survive and to thrive. I think it’s important for a society to culture and tolerate the hacker spirit. Not everyone has it, but the few who do have it help make society more resilient and survivable in hard times.
CSDN: Do you have other words you would like to share with Chinese readers?
bunnie: Recently, I was reading some comments on a Chinese web forum, and it seems that many Chinese regard the term “Shanzhai” as a negative term. This was surprising to me, because as an outsider, I feel that the Shanzhai have done a lot of very interesting and useful innovation. I think in English, we have a similar problem. The term “hacker” in English started as a good term, but over time became associated with many kinds of negative acts. Recently the term “Maker” was coined to distinguish between the positive and negative aspects of hackers (I still call myself a hacker because I still adhere to the traditional definition of the word).
It may be easier to explain the innovation happening in China, if in Chinese a similar linguistic bifurcation could happen. I had recently proposed referring to the innovative, open aspects of what the Shanzhai do as “gongkai” (公开), referring to their method of sharing design files. Significantly, the term 开放 as used in 开放源代码 I feel doesn’t quite apply, because it refers to a specific Western-centric legal aspect of being open, which is not applicable to the methods engaged in the Chinese ecosystem.
However, the fact that China has found its own way to share IP, unique from the Western system, doesn’t mean it’s bad. I think in fact, it’s quite interesting and I’m very curious to see where it goes. Since I see positive value in some of the methods that the Shanzhai use, I’d propose using the more positive/generic term “gongkai” to describe the style of IP sharing commonly engaged in China.
But then again, who am I to say — I’m not a native Chinese speaker, and maybe there is a much better way to address the situation.
Tags: china, gongkai |
package core.command;
public abstract class BaseCommand implements Command{
public BaseCommand(Object... args) {
this.parseArgs(args);
}
protected abstract void parseArgs(Object... args);
}
|
Zeolite Supported-Nano TiO₂ Composites Prepared by a Facile Solid Diffusion Process as High Performance Photocatalysts. In this paper, zeolite supported-nano TiO2 photocatalytic composites were synthesized by an easily-operated solid diffusion process, in which zeolite was used as the support matrix. And the microstructures, morphologies and photocatalytic properties of the zeolite supported-nano TiO2 composites were characterized and analyzed by scanning and transmission electron microscopy, X-ray photoelectron spectroscopy, UV-visible spectroscopy, fluorescence spectroscopy and methylene blue degradation tests. The results showed that zeolite matrix reduced agglomeration of nano-TiO2 and enhanced the absorption ability within the UV-Vis range, consequently increased the photocatalytic activity of the composites. Meanwhile, the influences of TiO2/zeolite proportion on their photocatalytic performances were explored, which indicated that 90 wt% TiO2/zeolite had optimal photocatalytic capability and stable properties. |
A comparison of three antibiotic regimens for eradication of Haemophilus influenzae type b from the pharynx of infants and children. The communicability of Haemophilus influenzae type b has recently been shown to be comparable to that of Neisseria meningitidis.1,2 The secondary attack rate of H influenzae type b disease is approximately 2% in young contacts of an index case, an observation that has stimulated investigators to attempt to identify effective chemoprophylactic regimens. Elsewhere in this issue are two studies reporting successful use of rifampin in eradicating H influenzae type b from the pharynx of infants and children who were exposed to patients with the disease.3,4 Other regimens have not been effective.5,6 The purpose of this brief communication is to report our results with three antimicrobial agents that were given in an attempt to eliminate pharyngeal carriage of H influenzae type b in infants and children. |
Thousands and thousands of lovers’ padlocks now decorate many Paris bridges, in what has become a new tradition among romantic tourists visiting the French capital.
The phenomenon of attaching little padlocks to the parapets of the Pont des Arts bridge across the capital’s River Seine began in 2008 and over the last five years, lovers have also left their mark on other bridges and spots nearby.
“The problem is that there are more and more padlocks and it is starting to cause the fencing and parapets to sag on the Pont des Arts”, says Jean-Pierre Lecoq, Mayor of the VI arrondissement, where the bridge is located.
There are so many that some are attached to each other because there is no room on the parapet.
“The danger is that one day a piece of the parapet, heavy with several kilos of padlocks, falls down onto one of the river cruise boats below and hurts someone seriously, or even kills them,” worries M. Lecoq.
Recently little posters have been stuck on the bridge saying that anyone attaching a padlock will face a 20 euro fine, but no one at the official Paris authorities knows who put the posters up.
Jean-Pierre Lecoq suggests that the padlocks be removed at regular intervals, roughly every six months so that they do not become too heavy.
But Paris City Hall authorities say they have no plans at the moment to take any action.
“We think its rather lovely,” said an official. “When we feel that there is a safety issue we will remove some of the padlocks from areas where there might be a danger. We are not going to remove all the padlocks from any one of the bridges”.
Some people travel thousands of miles to attach their padlocks and pledge their love on the bridge in the hope that it will last forever.
“They mustn’t be removed. It’s a symbol of love. It would be better to strengthen the bridge,” said one Australian tourist posing with her boyfriend in front of the padlock they had just attached, promising eternal love. |
Transcriptional Heat Shock Response in the Smallest Known Self-Replicating Cell, Mycoplasma genitalium ABSTRACT Mycoplasma genitalium is a human bacterial pathogen linked to urethritis and other sexually transmitted diseases as well as respiratory and joint pathologies. Though its complete genome sequence is available, little is understood about the regulation of gene expression in this smallest known, self-replicating cell, as its genome lacks orthologues for most of the conventional bacterial regulators. Still, the transcriptional repressor HrcA (heat regulation at CIRCE ) is predicted in the M. genitalium genome as well as three copies of its corresponding regulatory sequence CIRCE. We investigated the transcriptional response of M. genitalium to elevated temperatures and detected the differential induction of four hsp genes. Three of the up-regulated genes, which encode DnaK, ClpB, and Lon, possess CIRCE within their promoter regions, suggesting that the HrcA-CIRCE regulatory mechanism is functional. Additionally, one of three DnaJ-encoding genes was up-regulated, even though no known regulatory sequences were found in the promoter region. Transcript levels returned to control values after 1 h of incubation at 37°C, reinforcing the transient nature of the heat shock transcriptional response. Interestingly, neither of the groESL operon genes, which encode the GroEL chaperone and its cochaperone GroES, responded to heat shock. These data suggest that M. genitalium selectively regulates a limited number of genes in response to heat shock. |
New heteroleptic bis-phenanthroline copper(I) complexes with dipyridophenazine or imidazole fused phenanthroline ligands: spectral, electrochemical, and quantum chemical studies. Two new sterically challenged diimine ligands L (2,9-dimesityl-2-(4'-bromophenyl)imidazophenanthroline) and L (3,6-di-n-butyl-11-bromodipyridophenazine) have been synthesized with the aim to build original heteroleptic copper(I) complexes, following the HETPHEN concept developed by Schmittel and co-workers. The structure of L is based on a phen-imidazole molecular core, derivatized by two highly bulky mesityl groups in positions 2 and 9 of the phenanthroline cavity, preventing the formation of a homoleptic species, while L is a dppz derivative, bearing n-butyl chains in positions of the chelating nitrogen atoms. The unambiguous formation of six novel heteroleptic copper(I) complexes based on L, L, and complementary matching ligands (2,9-R-1,10-phenanthroline, with R = H, methyl, n-butyl or mesityl) has been evidenced, and the resulting compounds were fully characterized. The electronic absorption spectra of all complexes fits well with DFT calculations allowing the assignment of the main transitions. The characteristics of the emissive excited state were investigated in different solvents using time-resolved single photon counting and transient absorption spectroscopy. The complexes with ligand L, bearing a characteristic dppz moiety, exhibit a very low energy excited-state which mainly leads to fast nonradiative relaxation, whereas the emission lifetime is higher for those containing the bulky ligand L. For example, a luminescence quantum yield of about 3 10(-4) is obtained with a decay time of about 50 ns for C2 ((+)) with a weak influence of strong coordinating solvent on the luminescence properties. Overall, the spectral features are those expected for a highly constrained coordination cage. Yet, the complexes are stable in solution, partly due to the beneficial stacking between mesityl groups and vicinal phenanthroline aromatic rings, as evidenced by the X-ray structure of complex C3 ((+)). Electrochemistry of the copper(I) complexes revealed reversible anodic behavior, corresponding to a copper(I) to copper(II) transition. The half wave potentials increase with the steric bulk at the level of the copper(I) ion, reaching a value as high as 1 V vs SCE, with the assistance of ligand induced electronic effects. L and L are further end-capped by a bromo functionality. A Suzuki cross-coupling reaction was directly performed on the complexes, in spite of the handicapping lability of copper(I)-phenanthroline complexes. |
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.test;
import android.net.NetworkStats;
import android.net.TrafficStats;
import android.os.Bundle;
import android.util.Log;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
/**
* A bandwidth test case that collects bandwidth statistics for tests that are
* annotated with {@link BandwidthTest} otherwise the test is executed
* as an {@link InstrumentationTestCase}
*/
public class BandwidthTestCase extends InstrumentationTestCase {
private static final String TAG = "BandwidthTestCase";
private static final String REPORT_KEY_PACKETS_SENT = "txPackets";
private static final String REPORT_KEY_PACKETS_RECEIVED = "rxPackets";
private static final String REPORT_KEY_BYTES_SENT = "txBytes";
private static final String REPORT_KEY_BYTES_RECEIVED = "rxBytes";
private static final String REPORT_KEY_OPERATIONS = "operations";
@Override
protected void runTest() throws Throwable {
//This is a copy of {@link InstrumentationTestCase#runTest} with
//added logic to handle bandwidth measurements
String fName = getName();
assertNotNull(fName);
Method method = null;
Class testClass = null;
try {
// use getMethod to get all public inherited
// methods. getDeclaredMethods returns all
// methods of this class but excludes the
// inherited ones.
testClass = getClass();
method = testClass.getMethod(fName, (Class[]) null);
} catch (NoSuchMethodException e) {
fail("Method \""+fName+"\" not found");
}
if (!Modifier.isPublic(method.getModifiers())) {
fail("Method \""+fName+"\" should be public");
}
int runCount = 1;
boolean isRepetitive = false;
if (method.isAnnotationPresent(FlakyTest.class)) {
runCount = method.getAnnotation(FlakyTest.class).tolerance();
} else if (method.isAnnotationPresent(RepetitiveTest.class)) {
runCount = method.getAnnotation(RepetitiveTest.class).numIterations();
isRepetitive = true;
}
if (method.isAnnotationPresent(UiThreadTest.class)) {
final int tolerance = runCount;
final boolean repetitive = isRepetitive;
final Method testMethod = method;
final Throwable[] exceptions = new Throwable[1];
getInstrumentation().runOnMainSync(new Runnable() {
public void run() {
try {
runMethod(testMethod, tolerance, repetitive);
} catch (Throwable throwable) {
exceptions[0] = throwable;
}
}
});
if (exceptions[0] != null) {
throw exceptions[0];
}
} else if (method.isAnnotationPresent(BandwidthTest.class) ||
testClass.isAnnotationPresent(BandwidthTest.class)) {
/**
* If bandwidth profiling fails for whatever reason the test
* should be allow to execute to its completion.
* Typically bandwidth profiling would fail when a lower level
* component is missing, such as the kernel module, for a newly
* introduced hardware.
*/
try{
TrafficStats.startDataProfiling(null);
} catch(IllegalStateException isx){
Log.w(TAG, "Failed to start bandwidth profiling");
}
runMethod(method, 1, false);
try{
NetworkStats stats = TrafficStats.stopDataProfiling(null);
NetworkStats.Entry entry = stats.getTotal(null);
getInstrumentation().sendStatus(2, getBandwidthStats(entry));
} catch (IllegalStateException isx){
Log.w(TAG, "Failed to collect bandwidth stats");
}
} else {
runMethod(method, runCount, isRepetitive);
}
}
private void runMethod(Method runMethod, int tolerance, boolean isRepetitive) throws Throwable {
//This is a copy of {@link InstrumentationTestCase#runMethod}
Throwable exception = null;
int runCount = 0;
do {
try {
runMethod.invoke(this, (Object[]) null);
exception = null;
} catch (InvocationTargetException e) {
e.fillInStackTrace();
exception = e.getTargetException();
} catch (IllegalAccessException e) {
e.fillInStackTrace();
exception = e;
} finally {
runCount++;
// Report current iteration number, if test is repetitive
if (isRepetitive) {
Bundle iterations = new Bundle();
iterations.putInt("currentiterations", runCount);
getInstrumentation().sendStatus(2, iterations);
}
}
} while ((runCount < tolerance) && (isRepetitive || exception != null));
if (exception != null) {
throw exception;
}
}
private Bundle getBandwidthStats(NetworkStats.Entry entry){
Bundle bundle = new Bundle();
bundle.putLong(REPORT_KEY_BYTES_RECEIVED, entry.rxBytes);
bundle.putLong(REPORT_KEY_BYTES_SENT, entry.txBytes);
bundle.putLong(REPORT_KEY_PACKETS_RECEIVED, entry.rxPackets);
bundle.putLong(REPORT_KEY_PACKETS_SENT, entry.txPackets);
bundle.putLong(REPORT_KEY_OPERATIONS, entry.operations);
return bundle;
}
}
|
/*
* \brief Fallback to simpler lowering for dilation or grouped conv.
* \param data The input expr.
* \param weight The weight expr.
* \param param The qnn conv2d attributes.
* \return The fallback lowered sequence of Relay expr.
* \note In case of dilation, normal lowering would require a dilated pool.
* Since, we don't have dilated pool, we fallback to a simpler sequence of
* Relay operations. This will potentially lead to performance degradation
* as the convolution is called on int32 tensors instead of int8 tensors.
*/
Expr Conv2DFallBack(const Expr& data, const Expr& weight, const QnnConv2DAttrs* param) {
auto zp_data = MakeConstantScalar(Int(16), param->input_zero_point);
auto zp_kernel = MakeConstantScalar(Int(16), param->kernel_zero_point);
auto shifted_data = Cast(data, Int(16));
if (param->input_zero_point != 0) {
shifted_data = Subtract(Cast(data, Int(16)), zp_data);
}
auto shifted_kernel = Cast(weight, Int(16));
if (param->kernel_zero_point != 0) {
shifted_kernel = Subtract(Cast(weight, Int(16)), zp_kernel);
}
return Conv2D(shifted_data, shifted_kernel, param->strides, param->padding, param->dilation,
param->groups, param->channels, param->kernel_size, param->data_layout,
param->kernel_layout, param->out_layout, param->out_dtype);
} |
<filename>setup.py
#!/usr/bin/env python3
"Create the package"
from setuptools import setup
setup(
name='netdescribe',
version='0.2.8',
packages=['netdescribe',
'netdescribe.snmp'],
description='Library of functions for performing discovery on network devices.',
long_description='Library of functions for performing discovery on network devices.',
author='<NAME>',
url='https://github.com/equill/netdescribe',
author_email='<EMAIL>',
keywords=['network', 'discovery', 'snmp'],
install_requires=['pysnmp==4.4.12'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking :: Monitoring',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9'],
license='Apachev2'
)
|
Transformers, donkeys and a JCB digger will roll into town next weekend for a Castleford nursery’s summer fair.
Dozens of tots and parents will gather for Hillside Private Day Nursery’s Summer Fair 2015 on Saturday July 18.
The event will feature a mini man-made beach, donkey rides, games, live music, a special mascot race and birds of prey.
There will also be refreshments and a number of different stalls on offer at the nursery at The Old Rectory in Whitwood Lane.
Nursery owner Rachel Hill, who also runs a nursery in Altofts, Wakefield, said: “This year we are celebrating community, family and friends.
More than 100 people turned out for the event in 2013.
The fair will take place at the nursery from 11am-3pm. |
<reponame>MateusBarboza99/Python-03-
larg = float(input('Largura da parede: '))
alt = float(input('Altura da parede:'))
área = larg * alt
print(f'Sua parede tem a dimensão de {larg} x {alt} e sua área é de {área}m².')
tinta = área / 2
print(f'Para pintar essa parede, você precisa de {tinta}l de tinta:')
|
import styled from 'styled-components';
interface Props {}
export const SelectStyle = styled.select<Props>`
padding: 15px;
background: transparent;
border: 2px solid ${({ theme }) => theme.inputBorder};
color: ${({ theme }) => theme.inputTxt};
text-transform: uppercase;
font-family: Tomorrow-Regular;
transition: 0.2s;
&:hover,
&:focus {
filter: saturate(0.5);
}
`;
export const SelectOption = styled.option<Props>`
background: ${({ theme }) => theme.mainBg};
text-transform: uppercase;
font-family: Tomorrow-Regular;
`;
|
import pandas as pd
import pickle
primary_class = None
image_size = None
objective = None
direct = False
train_epochs = 10
batch_size = 32
parent_model = None
name = None
model = None
optimizer = None
criterion = None
accuracy_history_train = []
accuracy_history_val = []
extra_history = []
evaluate = False
retrain_name = None
def print_accountant():
print('Primary Class', primary_class)
print('Image Size', image_size)
print('Batch Size', batch_size)
print('Parent Mode', parent_model)
print('Name', name)
print('Evaluate', evaluate)
def add_accuracy(accuracy, state):
if(state == 'val'):
accuracy_history_val.append(accuracy)
else:
accuracy_history_train.append(accuracy)
def add_extra(extra):
extra_history.append(extra)
def load_accountant(file, should_evaluate = False, retrain = None):
global evaluate
global retrain_name
if(should_evaluate):
evaluate = True
if(retrain is not None):
retrain_name = retrain
f = open(file, "r").read()
exec(f, globals())
print_accountant()
def save_history(file):
with open(file, 'wb') as fp:
pickle.dump({'train':accuracy_history_train, 'val': accuracy_history_val, 'extra':extra_history}, fp) |
The present invention relates generally to radio display pagers, and more specifically to a radio display pager capable of displaying callers' messages.
According to conventional radio display paging systems, a destination pager's identifier, or telephone number, is dialed to the public switched telephone network (PSTN) to access a voice response unit 30 of a paging system (FIG. 1). The voice response unit urges the caller to send either his or her own telephone number or a message. If the caller's telephone number is to be displayed, it is dialed by the caller and the dialed information is passed through a selector 31 to a store and forward circuit 32 in which it is stored. The store and forward circuit 32 receives the pager's identifier from the PSTN and forwards the dialed information and the received pager's identifier to a transmitter 33 in which it is converted to a specified line code, which is broadcast from antenna 35. On receiving the transmitted signal, the pager checks to determine if the pager identifier contained in the signal matches the pager's individual identifier, and if it does, the pager displays the caller's telephone number. If the caller's message is to be displayed, on the other hand, the caller dials two symbol marks (asterisks) in sequence and then a sequence of sentence codes signifying letters and numerals. The selector analyzes the two asterisks and knows that the dialed codes following the asterisks comprise a message, and passes the dialed information to a store, convert and forward circuit 34 in which the two asterisks are converted to other symbols ("--"). On receiving the pager's identifier from the PSTN, the circuit 34 applies the pager's identifier, the converted symbols ("--") and the message to the transmitter. On receiving this signal, the pager knows that the codes following the symbols "--" are a message and uses them as an address pointer for accessing a memory in which sentence patterns and alphanumeric data are stored.
However, users of this type of paging system are required to additionally dial special symbols when calling in a message display mode and the paging system is required to distinguish the message display mode from other display modes. |
The Role of Adaptive Scaffolding System in Supporting Middle School Problem-Based Learning Activities This mixed-method study introduced an adaptive scaffolding system to support middle school science problem-based learning (PBL) activities. 298 6th-graders were grouped into three conditions, which are the adaptive scaffolding group, the non-adaptive scaffolding group, and a control group that did not receive any scaffoldings. Results showed that problem-solving self-efficacy for students who engaged with the adaptive scaffolding system had improved significantly compared to the other two groups. Moreover, since students were conducting the PBL activity under two modes (online and in-person), the results also showed that students in the online mode gained more science content knowledge compared to their peers in the in-person mode after engaging with the adaptive system. Student interviews revealed that the real-time supports and in-time feedback provided by the adaptive system were the key elements that facilitated their improvements, teacher interviews presented that the adaptive system provided effective assists for teachers to facilitate the PBL activities. |
PTFM: Pre-processing Based Traffic flow Mechanism for Smart Vehicular Networks Vehicles on the roads in urban areas increased exponentially over the last few decades which leads to numerous traffic-related problems. The traffic flow is getting disrupted due to traffic jams, congestions, collisions, and various other hazards. As a result of this the average fuel consumption, travel time, and pollution level is rising at a faster rate. The average speed of travel for vehicles slows down, especially in urban areas. This paper proposed a mechanism (PFTM) that is based on the pre-processing of information on an additional node which is called a shortcut node for managing the vehicular flow data. For every vehicle data and road, information is processed by using this node and saved for future use. This information is used in the future to guide the vehicles to follow better routes. In this article, the proposed mechanism is compared with the existing mechanism NRR, DIVERT and RE-route. The results of PFTM outperform the existing solutions. |
Healthy diets ASAP Australian Standardised Affordability and Pricing methods protocol Background This paper describes the rationale, development and final protocol of the Healthy Diets Australian Standardised Affordability and Pricing (ASAP) method which aims to assess, compare and monitor the price, price differential and affordability of healthy (recommended) and current (unhealthy) diets in Australia. The protocol is consistent with the International Network for Food and Obesity / non-communicable Diseases Research, Monitoring and Action Supports (INFORMAS) optimal approach to monitor food price and affordability globally. Methods The Healthy Diets ASAP protocol was developed based on literature review, drafting, piloting and revising, with key stakeholder consultation at all stages, including at a national forum. Discussion The protocol was developed in five parts. Firstly, for the healthy (recommended) and current (unhealthy) diet pricing tools; secondly for calculation of median and low-income household incomes; thirdly for store location and sampling; fourthly for price data collection, and; finally for analysis and reporting. The Healthy Diets ASAP protocol constitutes a standardised approach to assess diet price and affordability to inform development of nutrition policy actions to reduce rates of diet-related chronic disease in Australia. It demonstrates application of the INFORMAS optimum food price and affordability methods at country level. Its wide application would enhance monitoring and utility of dietary price and affordability data from a health perspective in Australia. The protocol could be adapted in other countries to monitor the price, price differential and affordability of current and healthy diets. Electronic supplementary material The online version of this article (10.1186/s12937-018-0396-0) contains supplementary material, which is available to authorized users. Background Poor diet is now the major preventable disease risk factor contributing to burden of disease, globally and in Australia. Less than 4 % of the population consume diets consistent with the evidence-based Australian Dietary Guidelines ; on average, at least 35% of the total daily energy intake of adults and at least 39% of the energy intake of children are now derived from unhealthy 'discretionary' food choices, defined as foods and drinks high in saturated fat, added sugar, salt and/or alcohol that are not required for health. Of particular concern is the contribution of poor diet to the rising rates of overweight and obesity. Based on measured height and weight, 25% of Australian children aged two to 17 years and 63% of Australian adults aged 18 years and over are now overweight or obese. There is an urgent need for nutrition policy actions to help shift the current diet of the population towards healthy diets as recommended by the Australian Dietary Guidelines. The expense of healthy foods has been reported as a key barrier to consumption in Australia, particularly among low socioeconomic groups. However, well-defined data in this area are lacking as classification of 'healthy' and 'unhealthy' foods and diets varies and the relative price of 'healthy' and 'unhealthy' foods depends on the unit of measure (i.e. per energy unit, nutrient density, serve or weight). Comparisons can be difficult particularly in the context of the total diet and habitual dietary patterns that are the major determinant of diet-related disease [3,. However, the relative price and affordability of current and healthy (recommended) diets have been assessed rarely, as opposed to the relative price of selected pairs of 'healthy' and 'less healthy' foods. Various methods have been utilised to assess food prices in Australia, such as Consumer Price Indexes (CPI) and supermarket price surveys, however these usually tally the price of highly selected individual food items and do not necessarily relate to relative cost of the total habitual diet. A variety of 'food basket' diet costing tools have also been developed at state, regional and community levels. These methods have the potential to measure the cost of a healthy diet. However, dissimilarity of metrics is a recognised barrier to the production of comparable data. A recent systematic review of food pricing methods used in Australia since 1995, identified 59 discrete surveys using five major food basket pricing tools (used in multiple survey areas and multiple time periods) and six minor food basket pricing tools (used in a single survey area or time period). No national survey had been conducted. Survey methods differed in several metrics including: type and number of foods surveyed; application of availability and/or quality measures; definition of reference households; calculation of household income; store sampling frameworks; data collection; and analysis. Hence results are not comparable across different locations or different times. With exception of Queensland Health's Healthy Food Access Basket tool revised in 2015, none of these fully align with a healthy diet as recommended by the Australian Dietary Guidelines. Further, none accurately reflect current Australian diets. Since 1995, the vast majority of 'healthy' food pricing surveys in Australia have confirmed that: food prices in rural and remote areas are up to 40% higher than those in capital cities; lower socioeconomic households need to spend a higher proportion of their income to procure healthy diets than other Australians, and food prices generally increase over time. Related calls for interventions, such as for freight subsidies or food subsidies for low income groups in specific regions have gone unheeded. Hence, it could be asserted that these surveys have had limited utility in informing fiscal and health policy. As a result, there have been several calls for the development of standardised, healthy food and diet pricing survey methods nationally in Australia and globally. There is also a need for policy-relevant data. The aim of relevant nutrition policy actions is to help shift the current intake of the whole population to a healthier diet consistent with dietary recommendations. Governments can manipulate food prices through a range of complex policy approaches. Three common strategies to increase the affordability of 'healthy' foods are: taxing 'unhealthy foods' ("fat taxes") e.g. on sugar sweetened beverages; exempting 'healthy foods' from goods and service tax (GST) or value added tax; and subsidising 'healthy foods, ' such as through agricultural and transport subsidies, retail price reductions, or voucher systems targeted to vulnerable population groups. Therefore, to inform relevant policy decisions, robust data are required for both current (unhealthy) and healthy (recommended) diets. With respect to food price and affordability, the key health and nutrition policy relevant question to be answered by food pricing surveys is: "What is the relative price and affordability of 'current' (unhealthy) and 'healthy' (recommended) diets?" While the potential effects of specific changes to fiscal policy have been modelled, recent 'real life' data are lacking to inform policy decision making in Australia. Assessment of the price, price differential and affordability of a healthy diet (consistent with Dietary Guidelines) and current (unhealthy) diets (based on national surveys), determined by standardised national methods, would provide more robust data to inform health and fiscal policy in Australia and monitor potential fiscal policy interventions. There is a lack of such data globally; the current research helps to address this, within the food price module of the International Network for Food and Obesity/ non-communicable diseases Research, Monitoring and Action Support (INFORMAS). Under the auspices of INFORMAS, the results of this study provide a potential globally-applicable stepwise food price and affordability monitoring framework that advocates 'minimal', 'expanded' and 'optimal' approaches, to establish benchmarks and monitor the cost of healthy food, meals and diets; the level depends on availability of data and country capacity. The novel INFORMAS 'optimal' approach proposes concurrent application of two food pricing tools to assess the price, price differential and affordability of a healthy diet (consistent with Dietary Guidelines) and current (unhealthy) diets (based on national surveys). It requires assessment of household income, representative sampling and, ideally, stratification by region and socio-economic status (SES). Based on the 'optimal' approach of the INFORMAS diet price and affordability framework, we developed a standardised method to assess and compare the price and affordability of healthy and current diets in Australia, provide more robust, meaningful data to inform health and fiscal policy in Australia, and develop national data benchmarks with the potential for international comparisons. This paper presents the resultant protocol for Healthy Diets ASAP methods in Australia. Aim The aim of this paper is to describe the development and final protocol of the Healthy Diets ASAP methods, based on the INFORMAS optimal price and affordability approach. It details tools and methods to assist others to apply the approach in a standard manner, in order to enable comparison of the price, price differential and affordability of healthy (recommended) and current (unhealthy) diets in Australia. Development of the healthy diets ASAP protocol Background: Developing and piloting the initial diet pricing tools and methods In November 2013, all key Australian stakeholders gave in-principle support at a national teleconference for the development of national food price and affordability monitoring methods based on the INFORMAS 'optimal' approach. The development and pilot testing of the methods using readily available dietary data for five household structures in high socio-economic (SES) and low SES areas is reported elsewhere. The findings confirmed that the general approach could provide useful, meaningful data to inform potential fiscal and health policy actions. Application of the diet pricing tools accurately reflected known composite food group ratios and the proportion of the mean food budget Australian households spent on discretionary foods and drinks in analysis of the Consumer Price Index (CPI) with respect to Australian Dietary Guidelines food groups. However, internal validity testing suggested that construction of some of the initial diet pricing tools could be improved to enhance accuracy. For example, while performance of both diet pricing tools was acceptable at household level, only the healthy diet pricing tool was acceptable at an individual level for all demographics in the sample; the unhealthy (current) diet pricing tool could be improved for the 14 year old boy and both genders aged 70 years or over. Further, potential systematic errors could be minimised by the utilisation of detailed dietary survey data in the Confidentialised Unit Record Files (CURFs) from the Australian Health Survey (AHS) 2011-12 and the Australian 2011-13 food composition database, both of which were unavailable at the time of the pilot study. Development of accepted, standardised diet pricing methods also required agreement from all key stakeholders on the final approach, including accord on systematic arbitrary decisions points around application of the tools (such as whether to record the price of the next largest or smallest packet if a particular size of food was unavailable in-store). There was also a desire to simplify methods to optimise uptake and utility. Development and testing of diet pricing tools and process protocols The final Healthy Diets ASAP protocols were developed in two phases. Phase one: Revising and re-testing initial tools and methods The food pricing tools were revised based on the pilot outcomes and feedback from international food pricing experts (including at the Food Pricing Workshop convened by authors AL and CP at the 14th International Society of Behavioural Nutrition and Physical Activity (ISBNPA) conference in Edinburgh May 2015). The revised unhealthy (current) diet pricing tools reflected dietary data at the five-digit level by age and gender groupings in the CURFs of the AHS 2011-12. The most commonly available branded items and unit sizes in Australian supermarkets were identified from the pilot. Other minor changes, and the reasons for these, are included in Table 1. The revised Healthy Diets ASAP diet pricing tools and methods were applied to assess the price, price differential and affordability of current and healthy diets in six randomly selected locations in two major cities (Sydney, New South Wales and Canberra, Australian Capital Territory) in November and December 2015. The preliminary reports of these studies were provided to NSW Health and ACT Health in early 2016. Colleagues in these government departments provided feedback on the revised methods early March 2016. Phase 2: Development of the final protocol At the national Healthy Diets ASAP Methods Forum (the Forum) held in Brisbane on 10 March 2016, 25 expert stakeholders from academia, government jurisdictions and non-government organisations (see Acknowledgements) worked together to finalise the Healthy Diets ASAP tools and methods for national application in Australia. De-identified preliminary data from and feedback on the reports provided to NSW Health and ACT Health were used to highlight methodological challenges and arbitrary decision points during the Forum. Generally, the revised tools and methods applied in Sydney and Canberra were confirmed at the Forum. However, some simplifications around arbitrary decision points were recommended ( Table 2). The revised tools and methods were finalised according to the recommendations from the Forum. The resultant Healthy Diets ASAP protocol is described in detail in the results. Following the Forum, the food price data collected in Sydney and Canberra in late 2015 were reanalysed according to the Healthy Diets ASAP protocol and the preliminary reports to NSW Health and ACT Health were finalised in May 2016. The healthy diets ASAP protocol There are five parts to the Healthy Diets ASAP protocol. The healthy diets ASAP protocol part one: Construct of the diet pricing tools There are two diet pricing survey tools: the current (unhealthy) diet pricing tool; and the healthy (recommended) diet pricing tool ( Table 3). The diet pricing survey tools include provision of quantities of food for a reference household consisting of four people, including an adult male 31-50 years old, an adult female 31-50 years old, a 14 year old boy and an 8 year old girl. An allowance for edible portion/as cooked, as specified in AUSNUT 2011-13, is included in both diet pricing tools. Any post plate wastage was not estimated or included. The healthy diets ASAP current (unhealthy) diet pricing tool The current (unhealthy) diet pricing tool constitutes the sum of the mean intake of specific foods and drinks, expressed in grams or millilitres, in each age/gender group corresponding to the four individuals comprising the reference household, as reported in the AHS 2011-12. Foods are grouped according to stakeholder recommendations (Table 2) and amounts consumed per day are derived from the CURFs at 5-digit code level. The mean reported daily intake for each of the four individuals (Additional file 1) are multiplied by 14 and tallied to produce the quantities consumed per household per fortnight. The amounts of foods and drinks comprising the Healthy Diets ASAP current (unhealthy) diet for the reference household per fortnight is presented in Table 3. The total energy content of the reference household's current diet is 33,860 kJ per day. Common brands of included food and drink items are included in the data collection sheet in Table 4. The healthy diets ASAP healthy (recommended) diet pricing tool The healthy diet pricing tool reflects the recommended amounts and types of foods and drinks for the reference household for a fortnight, consistent with the Australian Guide to Healthy Eating and the Australian Dietary Guidelines. The amounts are calculated from the daily recommended number of servings and relevant serve size of foods for the age/gender and physical activity level (PAL) of 1.5 of the four individuals comprising the reference household in the omnivorous Foundation Diet models. As the Foundation Diets were developed for the smallest adults (or in the case of children, the youngest) in each age/gender group, the amounts of foods were increased by 20% for the 8 year old Table 1 Minor revisions to the initial diet pricing tools and methods Improvement Aim/rational/comment Added bottled water, olive oil, and relatively healthy pre-made "convenience" foods, such as sandwiches and cooked chicken, to the healthy (recommended) diet pricing tools To enhance comparability with the current (unhealthy) diet pricing tools, that include comparable, but less healthy, options Further aggregated nutritionally similar products with similar utility in both diet pricing tools (for example, 'cabana' and 'bratwurst' were grouped with 'sausages') To minimize the number of items to be priced in-store to reduce survey burden and cost Included the same food groupings in the healthy food component of both current and healthy diet pricing tools To simplify data collection, comparison between current and healthy diets and interpretation of results Adjusted the diet of the 8 year old girl (who was the oldest in her age/ gender group) from the base Foundation Diets levels, according to the prescribed methods of Total Diet modelling to inform the 2013 revision of the Australian Guide to Healthy Eating of the Australian Dietary Guidelines To ensure adequate energy content of the constructed healthy (recommended) diet of the 8 year old girl in the reference household Adjusted median household income at Statistical Area Level 2 (SA2) level by relevant wage price index; clarified that available data sets at SA2 level provide median gross (i.e. not disposable) household income To incorporate the effect of inflation. Median household income at sub-national (area) level is readily available from published government sources, so has been used frequently in calculation of food affordability in Australia. However, published median household income data at area level reflects gross (total) income and has not been adjusted for essential expenditures such as taxation, to reflect disposable household income; results should be interpreted accordingly. Included a third option for estimating median disposable household income at the national level, for use in future national diet price and affordability surveys. To enhance comparability with low (minimum) disposable income household income, that is also calculated at the national level. Median disposable household income is available only at national level currently; however data may be available at state/territory level in the future. c) Cheapest price could also be collected to answer an optional additional question, but inclusion of cheapest price, including of sales or generic items, has potential to bias, affect comparability and distort results over time. d) As above e) As above. If optionally, collecting the cheapest price, could use multi buy price by dividing to obtain single price f) May need to use multiple data collection forms for each store or add additional data collection column if collecting optional prices 4: Unhealthy (current) diet pricing tool a) Adjust for known under-reporting in AHS 2011-12? No adjustment; report as 'best case scenario' There are no robust data on which to base adjustment factor, so could introduce error. Analysis is not adjusted for any other reasons. b) Confirm coding for five food group and discretionary foods? Tinned meat and vegetables-code as veg and meat Tinned fruitcode as fruit Ham salad sandwich-(replace with chicken salad sandwich) and code as 1/3 bread, 1/3 veg, 1/3 chicken meat Choc-chip Muesli barcode as discretionary Flavoured milkcode as non-discretionary (decision consistent with ABS classification) Processed meats (e.g. ham)code as discretionary Waterinclude reported water intake as bottled water (costed) and as tap water (not costed) Decisions should be consistent with coding used by the ABS in the AHS 2011-12 Revisit decisions reassessed when the Australian Dietary Guidelines (ADGs) are reviewed (i.e. in 5 years' time) Healthy (recommended) diet pricing tool Should any extra healthy foods be included? Such as more convenience options, bottled water? Is the healthy diet unrealistic without inclusion of some discretionary foods or drinks, such as alcohol? Waterinclude reported water intake as bottled water Convenience items-confirmed inclusion of roasted chicken and sandwich-no further inclusions Use the ADG Modelled Foundation diets based on rationale that: −63% Australian adults are overweight/obese -There was no adjustment for underreporting in current diet girl who is the oldest in her height/age group, according to the recommendations. To ensure the most commonly consumed healthy foods in Australia are used, food categories in the healthy diet pricing tool are the same as those in the current diet pricing tool (but differ in quantity). A variety of fresh, canned, frozen and dried foods is included. For example, representative categories of fresh produce reflect common fruit and vegetables available all year round in Australia. Luxury products, such as imported fruit and vegetables (particularly those out of season) and foods with very high cost per kilogram (e.g. oysters, smoked salmon) are excluded. Some 'convenience' foods are included in the healthy diet pricing tool as per stakeholder decisions (Table 2). Consistent with Australian recommendations, the healthy diet pricing tool does not contain any discretionary choices. It includes: grain (cereal) foods, in the ratio 66% wholegrain and 33% refined varieties; cheese, milk, yoghurt and calcium-fortified plant based alternatives, mostly (i.e. > 50%) reduced fat, with a maximum of 2-3 serves of high fat dairy foods (cheese) per person per week; lean meat (beef, lamb, veal, pork), poultry and plant-based alternatives (with no more than 455 g red meat per person per week); a minimum of 140 g and up to 280 g fish per person per week; up to 7 eggs per person per week; a selection of different colours and varieties of vegetables (green and brassica, orange, legumes, starchy vegetables, other vegetables) with a minimum 350 g per day for adults; a variety of fruit with a minimum of 300 g per day for adults; and an allowance of unsaturated oils or spreads or the nuts/seeds from which they are derived. The daily quantities of food categories recommended for each individual (age/gender) in the reference household (Additional file 2) are multiplied by 14 and tallied to provide quantities per fortnight ( Table 3). The amounts of foods and drinks comprising the Healthy Diets ASAP healthy (recommended) diet for the reference household per fortnight are presented in Table 3. The total energy content of the household's healthy diet is 33,610 kJ per day. Common brands of included food and drink items are included in the data collection sheet in Table 4. Diet pricing tools for additional household structures Several stakeholders requested ( Table 2) that the composition of current (unhealthy) and healthy (recommended) diets be provided for four other household compositions commonly investigated in Australia 1 (for example, for single parent or pensioner households) so that additional data analysis could be performed. These data are included in Additional file 3. and recommendations. Further, the proportion of household food expenditure on discretionary items (around 58%) is similar to that described by the ABS (58.2%) using different methods based on household expenditure. Hence the tools appear valid for use in estimating the cost of current and healthy diets. Validity of the diet pricing survey tools The healthy diets ASAP protocol part two: Location and store sample selection A random sample of the Statistical Area Level 2 (SA2) locations in each town is selected to achieve a representative sample. SA2 locations are stratified by the Index of Relative Socio-Economic Disadvantage for Areas (SEIFA) quintile using information and maps available on the ABS website Following sample size calculations, the required number of SA2 locations within SEIFA Quintile 1, 3 and 5 are selected randomly for participation. Food outlets within seven kilometres by car of the centre of each SA2 location are identified with Google™ Maps and included in the surveys. Stores to survey include one outlet of all supermarket chains (in trials these were Coles™, Woolworths™ and Independent Grocers Australia (IGA™), Supabarn™ and ALDI™), 'fast-food'/take-away outlets (a Big Mac™ hamburger from the McDonald's™ chain; pizza from the Pizza Hut™ chain; fish and chips from independent outlets) and two alcoholic liquor outlets closest to the geographical centre of each SA2 location. The healthy diets ASAP protocol part three: Collecting and entering food price data The Healthy Diets ASAP diet price survey data collection form (Table 4) combines the items included in the current diet and the healthy diet for convenience and utility. The agreed price data collection protocol is presented in Table 5 and is printed on each data collection form. Research assistants are trained to use the form and follow the price collection protocol strictly. Prices are collected within the same 4 week/monthly period, as prices change over time. Permission to participate is sought from each store manager prior to data collection. Data entry and analysis sheets have been developed using Excel™ spreadsheets. Double data entry is recommended to minimise error. Data are cleaned and checked. Any missing values are imputed to ascribe the mean price of the same food item in all other relevant outlets in the same SA2 area. Data analysis tools are available from the corresponding author. As has been achieved previously for the Victorian Health Food Access Basket, the Healthy Diets ASAP App is under development to streamline data collection and analysis and reduce error. Table 4 Healthy Diets ASAP (Australian Standardised Affordability and Price) Survey Data Collection Form The healthy diets ASAP protocol part four: Determination of household income Household income is determined by either of three methods, depending on the purpose of the study and the granularity of available data. Median household gross income at area level In Australia, national census data is the only source of SA2 level household income data and is provided only at total (gross) level. Median gross household income is determined per week (before taxation, rent and other expenses) in each SA2 area by entering relevant post codes into the Community Profile data calculator that is based on the 2011 Census results, adjusted for the wage price index (for example, there was an increase of 11.1% from September 2011 to September 2015) and multiplying by two to derive median household income in each SA2 area per fortnight. Details and examples are provided in Additional file 5. Indicative low (minimum) disposable household income Indicative low (minimum) income of the reference household (and other households of interest to specific stakeholders) is calculated based on the level of minimum wages and determination of the welfare payments provided by the Department of Human Services as per the methods used by the Queensland Department of Health. Assumptions are made for employment, housing type, disability status, savings and investments, child support, education attendance and immunisation status of children ( Table 6). As welfare policy actions can change, the most recent schedules should be used. Where it is higher than the minimum threshold, the indicative low (minimum) household income is adjusted for taxation payable so also represents minimum household disposable income. Details and examples are provided in Additional file 6. Median household disposable income at national level For assessment of diet affordability at the national level, median equivalised disposable household income for the reference family composition is sourced from the Survey of Income and Housing. The healthy diets ASAP protocol part five: Data analysis and reporting The price of the healthy (recommended) and current diets in each store and the mean price for each SEIFA quintile is calculated for the reference household composition in each of areas surveyed in each city. Results can be presented in a range of metrics, including the cost of the total diets per household per fortnight, and the cost of purchasing specific five food group and discretionary foods and drinks (including policy relevant items such as alcohol, 'take-away foods' and sugar-sweetened beverages). The results for the current (unhealthy) diet and healthy (recommended) diet are compared to determine the differential. Affordability of the healthy and current diets for the reference household is determined by comparing the cost of each diet with the median gross household income (Additional file 5) and also with the indicative low (minimum) disposable income of low income households (Additional file 6). Where a representative national survey of diet prices has been conducted, affordability of the healthy and current diets for the reference household is determined by comparing the cost of each diet with the median equivalised disposable income and with the indicative low (minimum) disposable income of low income households. Internationally, a benchmark of 30% Table 5 Healthy Diets ASAP food price data collection protocol 1. Record the usual price of an item, i.e. do not collect the sale/special price unless it is the only price available (if so, note in comment column) 2. Look for the specified brand and specified size for each food item, and record the price If the specified brand is not available: Choose the cheapest brand (non-generic) available in the specified size. Note this brand in the "Your brand" column If the specified size is not available: Choose the nearest larger size in the specified brand. If a larger size is not available, choose the nearest smaller size. Note this size in the "Your size" column If both the specified brand and specified size are not available: Choose the cheapest in the nearest larger size of another brand (non-generic). If a larger size is not available, choose the nearest smaller size If multiple brands are specified, record the price of the cheapest one and note brand in the "Your brand" column If the item is only available in a generic form (e.g. Home Brand, Coles, Woolworths Select, Black and Gold) choose the most expensive generic item in the specified size. If the specified size is not available, choose the nearest larger size. If a larger size is not available, choose the nearest smaller size. Note the generic name in the "Your brand" and the size in the "Your size" columns 3. Loose produce: choose the usual cheapest price per kg of the variety not on special. If the only variety available is on special, record the special price and note in comments column 4. Peanuts: choose the branded packet size closest to 250 g. If packaged, roasted, unsalted peanuts are not available, record the price of the loose 'bulk scoop & weigh' roasted, unsalted peanuts per 100 g 5. Check all data are collected and recorded as above, before leaving store Table 6 Assumptions applied to determine the indicative low (minimum) disposable household income of the reference household of income has been used to indicate affordability of a diet. Data files can be manipulated to investigate the effects of potential fiscal policy changes on the affordability of current (unhealthy) and healthy (recommended) diets for the reference household. The price of the relevant foods and drinks can be modified readily to highlight the likely 'real-world' impacts of different scenarios, for example, to investigate the potential extension of the Goods and Services Tax (GST) on basic healthy foods, or the potential application of different levels of taxation on sugary drinks in Australia. Discussion There are several methodological limitations inherent in the Healthy Diets ASAP protocols. Given that it is based on national reported mean dietary intakes, the cost of the current (unhealthy) diet is unlikely to be the same as actual expenditure on food and drinks in specific areas and among specific groups. Other assumptions commonly made in similar apparent consumption and household expenditure surveys include that food is shared equitably throughout the household, that there is no home food production and minimal wastage. Nutritionally similar products were aggregated to minimise the number of items included in the diet pricing tools, but products were not necessarily homogenous in terms of price. However, similar healthy food items were included in each diet to try to minimise any unintended effects. Ideally, the specific foods included in both diet pricing tools are culturally acceptable, commonly consumed, widely available, accessible and considered 'every day' rather than luxury items. As the foods and drinks included in the current diet pricing survey tool reflect actual consumption data, it was presumed that they were deemed by the population as a whole as meeting these requirements. No adjustments were made for costs such as transport, time, cooking equipment and utilities; as these apply to both current and healthy diets, assessment of the price differential between the two can help control for some of these hidden costs to some extent. However, these hidden costs would increase actual diet costs and decrease affordability of the diets. No adjustments were made to account for the marked under-reporting in the AHS 2011-12, reported dietary variability amongst different groups other than age/gender stratification, or the greater proportion of pre-prepared 'convenience' items in the current diet pricing tool compared with the healthy diet pricing tool. Given the high rates of overweight/obesity in Australia, the Foundation Diets were prescribed for the shortest and least active in each age group according to the modelling that informed the Australian Guide to Healthy Eating ; however this would under-estimate the requirements of taller, more active and healthy weight individuals. No attempt was made to control the price of the healthy diet pricing tool or the current diet pricing tool for energy, as the diets are constructed on recommended energy levels and actual reported levels of energy respectively. Further, the energy content of each tool is a determinant variable that directly affects diet-related health outcomes. As most Australians are already overweight or obese, increasing recommended energy requirements in excess of Foundation Diets is not consistent with optimum health outcomes. As the key exposure variable affecting the life time risk of diet-related disease is the total diet and dietary patterns, approaches such as this that compare metrics of actual current diets with recommended diets are more pertinent to the health policy debate than the more common, but limited, studies into the relative price of selected 'healthy' and 'unhealthy' foods or single 'optimised' diets. While a benchmark of 30% of income has been used to indicate affordability of diet internationally and in Australia it is not clear from the literature whether this income comparator is gross income or disposable income. Using disposable income to estimate affordability better reflects the capacity of a household to afford food/diets ; using gross income is a more conservative approach as it does not take taxation into account. However, in Australia currently, median disposable household income data are readily available only at national level ; at area level only median gross household income data are readily available. Further, the composition of the reference household does not align necessarily with that of households in the census in all areas. Comparing diet price with indicative low (minimum) disposable household more accurately estimates affordability of diets in vulnerable groups. However, the tax paid component of indicative low (minimum) disposable household income can be removed to improve comparability with estimates of affordability determined by application of gross median household income. Arbitrary decision points occur around sampling frameworks, data collection protocols (for example, selection of cheapest comparable generic item if the branded item is unavailable in any size), analysis and presentation of results, data sources and definitions of family and household income and composition. Such methodological limitations are common to other food price studies. In order for final methods to be replicable, agreement among key stakeholders including end users on each of these decision points at the Healthy Diets ASAP Forum was invaluable. Publication of detailed protocols is essential to support uptake, replicability, fidelity and transparency of the method. The detailed dietary survey data required to produce the current (unhealthy) diet pricing tool and the modelling data required to produce the healthy (recommended) diet pricing tool are not easily accessible in all countries and technical capacity to analyse individual records may be limited. Therefore, this optimal approach may be too complex for application to assess and monitor the price of diets from a health perspective globally. However, there is potential for the diet pricing tools to be adapted for use in other countries by substitution of food components with commonly-consumed local equivalents, dietary analysis and testing. Funding Financial support to develop the national Healthy Diets ASAP methods was provided by The Australian Prevention Partnership Centre through the NHMRC partnership centre grant scheme (Grant ID: GNT9100001) with the Australian Government Department of Health, NSW Ministry of Health, ACT Health, HCF, and the HCF Research Foundation. The funders had no role in the design, analysis or writing of this manuscript. Availability of data and materials The datasets supporting the conclusions of this article are included within the article and its additional files. Authors' contributions AL led the project, developed concepts, constructed the current diet pricing tool, finalised the healthy diet pricing tool, developed sampling methods, convened and chaired the national stakeholders forum and drafted the manuscript; SK assisted in constructing the current diet pricing tool and finalising the healthy diet pricing tool, transposed, cleaned and analysed data and assisted with the national stakeholders forum; ML assisted with transposing and analysing food price data, finalised the household income assessment protocol and assisted with the national stakeholders forum; EG developed an early draft of the household income assessment protocol; CP provided conceptual advice; TL accessed and analysed dietary intake data from the Confidential Unit Record Files (CURFs) of the Australian Health Survey 2011-13 (ABS 2013a) to inform development of the current diet pricing tool and advised on methods to determine household income; MD developed an early draft of the healthy diet pricing tool. All co-authors reviewed drafts of the paper and contributed to the final manuscript. Ethics approval and consent to participate The QUT University Human Research Ethics Committee assessed this study as meeting the conditions for exemption from Human Research Ethics Committee review and approval in accordance with section 5.1.22 of the National Statement on Ethical Conduct in Human Research ; the exemption number is 1500000161. All data were obtained from publically available sources and did not involve human participants. |
Bisphenol A Exacerbates Allergic Inflammation in an Ovalbumin-Induced Mouse Model of Allergic Rhinitis Purpose Bisphenol A (BPA) is found in many plastic products and is thus a common environmental endocrine disruptor. Plastic-related health problems, including allergic diseases, are attracting increasing attention. However, few experimental studies have explored the effect of BPA on allergic rhinitis (AR). We explore whether BPA was directly related to the allergic inflammation induced by ovalbumin (OVA) in AR mice. Methods We first constructed OVA-induced mouse model, and after BPA administration, we evaluated nasal symptoms and measured the serum OVA-specific IgE levels by ELISA. Th2 and Treg-related cytokines of nasal mucosa were measured by cytometric bead array. Th2 and Treg-specific transcription factor levels were assayed by PCR. The proportions of CD3+CD4+IL-4+Th2 and CD4+Helios+Foxp3+ T cells (Tregs) in spleen tissue were determined by flow cytometry. Results Compared to OVA-only-induced mice, BPA addition increased nasal symptoms and serum OVA-specific IgE levels. OVA and BPA coexposure significantly increased IL-4 and IL-13 protein levels compared to those after OVA exposure alone. BPA plus OVA tended to decrease the IL-10 protein levels compared to those after OVA alone. Coexposure to OVA and BPA significantly increased the GATA-3-encoding mRNA level, and decreased the levels of mRNAs encoding Foxp3 and Helios, compared to those after OVA exposure alone. BPA increased the Th2 cell proportion, and decreased that of Tregs, compared to the levels with OVA alone. Conclusion BPA exerted negative effects by exacerbating AR allergic symptoms, increasing serum OVA-specific IgE levels, and compromising Th2 and Treg responses. Introduction Allergic rhinitis (AR) is feature with nasal itching, sneezing, watery secretions, and congestion, reflecting the IgE-mediated mucosal inflammation driven by Th2 cells. AR affects over 500 million people worldwide. Regulatory T cells (Tregs) play an important role in preventing Th2-mediated inappropriate responses to environmental allergens. In recent years, the morbidity rate of AR has raised, especially among preschool children. However, whether there is any role for external factors remains unclear. Bisphenol A (BPA) is a common environmental endocrine disruptor, being widely found in plastics. Humans come into contact with BPA via the skin and when consuming food and water packaged in plastic containing BPA. BPA is an endocrine disruptor and may act as a weak estrogen; public health problems associated with BPA have attracted increasing attention. BPA exposure during the perinatal or prenatal period exacerbated allergic sensitization and bronchial inflammation in asthma model, and phthalates and BPA exacerbated atopic dermatitis in children. Although epidemiological studies have not yet clearly shown that BPA increases the incidence rates of allergy and asthma, it may enhance the risk of a Th2 response by altering immune cell function and cytokine production. BPA combined with OVA exacerbated eosinophilia severity in the lungs of adult mice, perhaps by promoting a Th2-biased immune response. As far as we know, few experimental studies have explored the effect of BPA on AR. Here, we explore whether BPA is directly related to the allergic inflammation induced by ovalbumin (OVA) in AR mice. AR Murine Model and BPA Intervention. BALB/c mice (8 weeks, female) were purchased from Changsheng Biotechnology Co., Ltd., Liaoning, China. All mice were raised on an OVA-free diet and randomly assigned to control, AR, and BPA groups (n = 10 each). The AR model has been described previously. On day 0 to day 14, BPA mice were subcutaneously injected with 0.5 mg/kg/d of BPA in corn oil; the other two groups received only corn oil. The experimental protocol, shown in Figure 1, was approved by the Ethics Committee of Shengjing Hospital. Evaluation of Nasal Symptoms and Sample Collection. After the last intranasal OVA challenge, the numbers of sneezes and nose rubs over 15 min were recorded. Blood samples were collected from mice that were sacrificed under anesthesia; serum was obtained via centrifugation and stored at −80°C prior to IgE detection. Nasal mucosal samples were stored for cytokine and quantitative real-time PCR (qRT-PCR) assays. Spleens were removed for detection of CD3 + CD4 + IL-4 + Th2 and CD4 + Helios + Foxp3 + Tregs via flow cytometry. Cytokine Measurements and Detection of OVA-Specific IgE. Nasal mucosa samples were crushed and centrifuged, and supernatant IL-4, IL-5, IL-13, and IL-10 levels were assayed using the CBA Flex Set. All samples underwent flow cytometry using the FACS Aria III instrument (BD Biosciences); the data were processed using FACSDiva and BD CBA software ver. 4.2 (BD Biosciences). Serum OVAspecific IgE levels were determined by ELISA kits (BioLegend). 2.5. qRT-PCR Analysis of Nasal Mucosal Samples. Total RNA was collected using the Total RNA Extraction Kit, and complementary DNA (cDNA) was synthesized via reverse transcription using the PrimeScript RT kit according to the manufacturer's instructions. qRT-PCR was performed using a Roche LightCycler 480 II system (Roche, Basel, Switzerland). The sequences of the PCR primers were listed in Table 1. The relative expression of the three target genes was determined using the cycle threshold (2 -△△CT ) method and normalized to the -actin level. 2.7. Statistical Analysis. All data are expressed as means ± SEM. One-way ANOVA was used to compare the groups. A P value < 0.05 was taken to indicate statistical significance. GraphPad Prism software (GraphPad Software Inc., La Jolla, CA) was used to statistically analyze and draw graphs. Effects of BPA on OVA-Induced AR Nasal Symptom and OVA-Specific IgE Levels. Sneezing and nose scratching are the principal symptoms of AR; any effect of BPA on AR depends on the extent to which BPA affects these symptoms. We recorded numbers of sneezes and nose rubs in the three groups of mice over 15 min after the last intranasal OVA challenge. As shown in Figure 2, the OVA and BPA groups exhibited significantly more symptoms than the control (P < 0:05); OVA plus BPA mice (BPA group) showed more symptoms than the OVA-only group (OVA group) (P < 0:05). Serum OVA-specific IgE levels were significantly elevated in the OVA-induced group compared to the control group (P < 0:05) and were even higher in the OVA plus BPA group ( Figure 3). BPA aggravated AR nasal symptoms and serum OVA-specific IgE levels. 3.3. Effect of BPA on the Cytokine Levels of Tregs. As we knew, IL-10, a major cytokine of Tregs, plays an important role in the development of AR. Figure 4(d) shows that, compared to PBS, OVA reduced IL-10 protein levels (P < 0:05). BPA tended to further reduce the protein levels (P < 0:05). Effect of BPA on Th2 Cell-Specific Transcription Factors. The effect of BPA on the levels of a Th2 cell-specific transcription factor (GATA-3) was evaluated. Compared to PBS, OVA increased the levels of mRNA encoding GATA-3 (P < 0:05, Figure 5(a)). BPA further increased the levels (P < 0:05). Effect of BPA on Treg-Specific Transcription Factor Levels. Foxp3 is the most specific marker of Tregs; Helios status is helpful for identifying Treg subsets showing consistent suppressive activity. We measured the levels of mRNA encoding Foxp3 and Helios. Figures 5(b) and 5(c) show that, compared to PBS, OVA reduced these levels; BPA further reduced the levels (P < 0:05). Effect of BPA on the Proportions of Th2 Cells. An imbalance among the CD4 + Th cell subsets, particularly Th2 cells, triggers and maintains allergic responses. We assessed the effect of BPA on the proportions of Th2 cell ( Figure 6). 3.7. Effect of BPA on the Proportion of Tregs. Tregs modulate the immune system and maintain tolerance to self-antigens. We measured the proportion of CD4 + Helios + Foxp3 + Journal of Immunology Research Tregs via flow cytometry; compared to PBS, OVA decreased the proportions of these cells (P < 0:05, Figure 7). BPA further decreased the proportions of the cells compared to OVA alone (P < 0:05). Discussion We used an established OVA-induced AR murine model to explore whether BPA affected allergic reactions. Nasal symptoms were exacerbated; meanwhile, OVA-specific IgE levels were increased, in BPA plus OVA-treated AR mice. BPA increased the proportions of Th2 cells, as well as the mRNA levels of GATA-3-and. AR symptoms are thus attributable to IgEmediated inflammation of the nasal mucosa. We found that BPA exposure significantly increased allergic symptoms and serum OVA-specific IgE levels; thus, BPA directly affected experimental AR. AR is a common disorder caused by an inappropriate Th2-mediated immune response to environmental antigens. Th2 cells secrete specific cytokines, including IL-4, IL-5, and IL-13, which are important drivers of AR immunopathology. IL-4 promotes T cell activation and differentiation into Th2 cells; IL-4 and IL-13 play roles in B cell differentiation and IgE and mucus production in the airway. IL-5 is locally produced at sites of allergic inflammation and recruits eosinophils from the bone 5 Journal of Immunology Research marrow; these cells contribute to AR injury by releasing cytotoxic granular proteins. The GATA-3 transcription factor is specific to Th2 cells. It promotes the production of IL-4, IL-5, and IL-13; induces Th0 to differentiate into Th2; and inhibits Th1 cell differentiation. We explored the effects of BPA on Th2 cells by measuring the proportions of CD3 + CD4 + IL-4 + Th2 cells, as well as the protein levels of Th2-related cytokines, and the mRNA level of GATA-3. BPA significantly increased expression of all of these cells and factors. Our results are similar to those of Yanagisawa ; OVA and BPA coexposure increased the mouse lung levels of mRNAs encoding IL-4, IL-5, and IL-13 compared to those after OVA exposure alone. We found that the IL-5 level in the BPA mice was somewhat higher than in the OVA ones; however, the difference was not significant, perhaps because of the small sample sizes (n = 5/group). Treg prevents inappropriate Th2 responses to environmental allergens. Th2 cells secrete inhibitory cyto-kines, including IL-10, that induce Treg formation from naive T cells and inhibit the development of other types of immune cells. Foxp3 is a Treg-related transcription factor, and Helios is a marker of Treg activation. The effects of BPA on the Treg response were evaluated by measuring the proportion of Tregs, IL-10 protein levels, and Helios and Foxp3 mRNA levels. Compared to OVA alone, coexposure to OVA and BPA reduced the Treg response (i.e., the proportion of Tregs, IL-10 protein levels, Helios and Foxp3 mRNA levels). Previous studies showed that BPA reduced the Tregs proportion, thus compromising the immune system; the Th1/Th2 ratio changed and disease developed. BPA is a common environmental endocrine disruptor, which can disrupt the human endocrine, reproductive, and immune systems through cell signaling pathways, and can increase the risk for certain diseases, including obesity, cardiovascular disease, brain disease, asthma, and even cancer. The signaling pathways affected by BPA include Journal of Immunology Research signal transducer and activator of transcription 3 (STAT3), early growth response gene-2, NF-B, and ERK1/2. Previous AR immune cellular signal pathway studies have mainly focused on the influence of the STAT family on Th differentiation. STAT family members include STAT1, STAT2, STAT3, STAT4, STAT5A, STAT5b, and STAT6. STAT1 and STAT4 are the key factors of IFN- signaling and IL-12 signaling, respectively, and both are critical for Th1 polarization. However, STAT6 can inhibit Th1 polarization, which is an important factor in Th2 signal transduction. STAT5 can regulate the differentiation of Treg cells by regulating Foxp3 expression. STAT3 is an essential transcription factor for Th17 differentiation and Treg inhibition. In addition, it can promote the development of Th2 cells under the background of STAT6 signaling. STAT3 is activated by immune cytokines and endocrinedisrupting chemicals, including BPA. Therefore, we speculated that STAT3 may be activated after exposure to BPA, the Th2 response is enhanced, and Treg reaction is inhibited through the STAT3 signaling pathway, further aggravating AR inflammation. Our previous studies confirmed the effects of the environmental hormone nonylphenol on AR, and showed that nonylphenol can aggravate Th2-associated immune reactions in an AR mouse model. The present study focused on BPA. However, a comprehensive analysis is required to determine the combined actions of environmental endocrine disruptors. Conclusion Our findings provide evidence of the negative effects of BPA in an OVA-induced AR mouse model. BPA can exacerbate AR allergic symptoms, increase serum levels of OVAspecific IgE, and compromise Th2 and Treg responses. Our results serve as a warning regarding the adverse effects of BPA in adult AR. Data Availability The data used to support the findings of this study are available from the corresponding author upon request. Conflicts of Interest The authors declare that they have no competing interests. |
/*
BSD 3-Clause License
Copyright (c) 2019, Tomas
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package io.github.tomaso2468.rpgonline.audio;
import java.net.URL;
/**
* <p>
* A class for managing audio. This class acts as a wrapper around the paulscode
* sound system. system. The following formats are supported: {@code .ogg},
* {@code .wav}, {@code .xm}, {@code .mod}, {@code .s3m}.
* </p>
* <p>
* Sounds are mapped as strings to urls locating audio files. They are only
* loaded once they are played. Music will be streamed. Volumes canm be set for
* different categories of sounds.
* </p>
* <p>
* For most audio settings (pitch & volume) the default value is 1 representing
* normal pitch and full volume. When sound positions are set they are assumed
* to be in real world coordinates.
* </p>
*
* @author Tomaso2468
*
*/
public interface AudioSystem {
/**
* Initialise the AudioSystem.
*/
public void init();
/**
* Adjusts the pitch by a random amount with a range either side of base.
*
* @param base The base pitch (usually 1)
* @param range The range (on either side) of the pitch that will be possible.
* @return A float value.
*/
public default float pitchAdjust(float base, float range) {
return (float) ((Math.random() * 2 - 1) * range + base);
}
/**
* Creates a pitch with around 1 with a range in either direction.
*
* @param range The range (on either side) of the pitch that will be possible.
* @return A float value.
*/
public default float pitchAdjust(float range) {
return pitchAdjust(1, range);
}
/**
* Deletes the sound system.
*/
public void dispose();
/**
* Gets the current class of the sound library.
*
* @return A class.
*/
public Class<?> getSoundLibraryClass();
/**
* Gets the currently playing piece of music.
*
* @return A ambient music object or null if no music is playing.
*/
public AmbientMusic getMusic();
/**
* Sets the currently playing piece of music.
*
* @param m A music object or null to stop all music.
*/
public void setMusic(AmbientMusic m);
/**
* sets the currently playing music based off of a sound ID.
*
* @param s A sound ID
*/
public void setMusic(String s);
/**
* Sets the currently playing music based on a music ID.
*
* @param s A music ID.
*/
public void setMusicID(String s);
/**
* Sets the volume of a specified sound group.
*
* @param g A sound group ID.
* @param v The desired volume.
*/
public void setGroupVolume(String g, float v);
/**
* Gets the current volume of a sound group.
*
* @param g A sound group ID.
* @return The current volume of a sound group.
*/
public float getGroupVolume(String g);
/**
* Sets the current volume of all audio (this is multiplied with all channels).
*
* @param v The desired volume.
*/
public void setMasterVolume(float v);
/**
* Gets the current volume of all audio.
*
* @return The current master volume.
*/
public float getMasterVolume();
/**
* Sets the current volume of music.
*
* @param v The desired volume.
*/
public void setMusicVolume(float v);
/**
* Gets the current volume of music.
*
* @return The current volume of music.
*/
public float getMusicVolume();
/**
* Sets the volume of normal (non-ambient) sounds.
*
* @param v The desired volume.
*/
public void setSoundVolume(float v);
/**
* Gets the current volume of normal (non-ambient) sounds.
*
* @return The current volume.
*/
public float getSoundVolume();
/**
* Sets the current volume of ambient sounds.
*
* @param v The desired volume.
*/
public void setAmbientVolume(float v);
/**
* Gets the current volume of ambient sounds.
*
* @return The current volume.
*/
public float getAmbientVolume();
/**
* Plays a sound at a specified location with a pitch, volume and velocity.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param p The pitch of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @param loop Set to true if the sound should be looped.
* @param dx The horizontal velocity of the sound.
* @param dy The vertical velocity of the sound.
* @param dz The depth velocity of the sound.
* @return The internal sound ID.
*/
public String playSound(String name, float v, float p, float x, float y, float z, boolean loop, float dx, float dy,
float dz);
/**
* Plays a sound at a specified location with a pitch, volume and velocity
* without looping.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param p The pitch of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @param dx The horizontal velocity of the sound.
* @param dy The vertical velocity of the sound.
* @param dz The depth velocity of the sound.
* @return The internal sound ID.
*/
public String playSound(String name, float v, float p, float x, float y, float z, float dx, float dy, float dz);
/**
* Plays a sound at a specified location with a pitch and volume without
* velocity or looping.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param p The pitch of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @return The internal sound ID.
*/
public String playSound(String name, float v, float p, float x, float y, float z);
/**
* Plays a sound at a specified location at the specified volume without
* velocity or looping and with a normal pitch.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @return The internal sound ID.
*/
public String playSound(String name, float v, float x, float y, float z);
/**
* Plays a sound at a specified location without velocity or looping and with a
* normal pitch and full volume.
*
* @param name The sound ID to play.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @return The internal sound ID.
*/
public String playSound(String name, float x, float y, float z);
/**
* Plays an ambient sound with a specified volume, pitch and position.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param p The pitch of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @param loop If the sound should be looped.
* @return The internal sound ID.
*/
public String playAmbient(String name, float v, float p, float x, float y, float z, boolean loop);
/**
* Plays an ambient sound with a specified volume, pitch and position without
* looping.
*
* @param name The sound ID to play.
* @param v The volume of the sound.
* @param p The pitch of the sound.
* @param x The position of the sound (horizontal).
* @param y The position of the sound (vertical).
* @param z The position of the sound (depth).
* @return The internal sound ID.
*/
public String playAmbient(String name, float v, float p, float x, float y, float z);
/**
* Sets the position of the player in 3D coordinates.
*
* @param x The horizontal position of the player.
* @param y The vertical position of the player.
* @param z The depth position of the player.
*/
public void setPlayerPos(float x, float y, float z);
/**
* Sets the velocity of the player in 3D coordinates.
*
* @param x The horizontal velocity of the player.
* @param y The vertical velocity of the player.
* @param z The depth velocity of the player.
*/
public void setPlayerVelocity(float x, float y, float z);
/**
* Gets the factor used to determine how the volume of a sound rolls-off with
* distance.
*
* @return A float value.
*/
public float getDistanceFactor();
/**
* Sets the factor used to determine how the volume of a sound rolls-off with
* distance.
*
* @param rf The roll-off factor
*/
public void setDistanceFactor(float rf);
/**
* Adds a sound.
*
* @param id The sound ID.
* @param loc The location of the sound.
*/
public void addSound(String id, URL loc);
/**
* Gets a piece of ambient music with the specified ID.
*
* @param id The music ID.
* @return An ambient music object or null.
*/
public AmbientMusic getAmbientMusic(String id);
/**
* Maps a piece of ambient music to an ID.
*
* @param id The music ID.
* @param m An ambient music object
*/
public void setAmbientMusic(String id, AmbientMusic m);
/**
* Stops all ambient sounds.
*/
public void stopAmbient();
}
|
Breast cancer wait times: Use of breast screening clinics affect presenting stage. 10686 Background: Wait times in the navigation of the diagnostic and therapeutic system of breast cancer have been increasingly investigated. It is inherently apparent that an earlier diagnosis would lead to an improved prognosis in breast cancer. In Ontario (Canada) Breast Screening Clinics (OBSC) allow direct access of patients to mammograms. METHODS A retrospective review of all breast cancer patients seen in the regional cancer centre in the year 2003 was performed. Wait times between the following events were recorded: first symptom to presentation to medical system, presentation to mammogram, mammogram to biopsy, biopsy to surgery, surgery to consultation at cancer centre. RESULTS In 2003, 277 new cases of breast cancer were seen at the regional cancer centre. Identified median waiting times were as follows: mammogram to diagnostic biopsy - 18.5 days, diagnostic biopsy to definitive surgery - 28 days; surgical consultation to definitive surgery - 13 days; definitive surgery to oncology consultation - 31 days. Some wait times were longer in those patients who did not have close geographic access to OBSC and a regional cancer centre: mammography to diagnostic biopsy was doubled (17 to 34 days) and surgical consult to surgical date was doubled (12 vs 26.5 days). Eighty per cent (n = 27) of patients identified by OBSC presented with Stage I or less breast cancer vs 37% of all other patients. Seventeen per cent of patients seen at the regional cancer centre were less than 50 years of age and not eligible for the OBSC. CONCLUSIONS The wait times reported are in keeping with the current experience in Ontario, Canada. It is most likely that access to a breast-screening clinic allows self-selection of a more highly motivated population. This population of patients consistently presented with earlier stage and more curable disease. The challenge that remains is to increase the number of patients that access breast-screening clinics. Presently, only 13% of presenting patients seen at the regional cancer centre were identified by the OBSC. We are identifying barriers to the use of this very effective strategy. Cancer Care Ontario. Ontario Wait Times Strategy. www.health.gov.on.ca. . |
Distinction of Physiologic and Epileptic Ripples: An Electrical Stimulation Study Ripple oscillations (80250 Hz) are a promising biomarker of epileptic activity, but are also involved in memory consolidation, which impairs their value as a diagnostic tool. Distinguishing physiologic from epileptic ripples has been particularly challenging because usually, invasive recordings are only performed in patients with refractory epilepsy. Here, we identified healthy brain areas based on electrical stimulation and hypothesized that these regions specifically generate pure ripples not coupled to spikes. Intracranial electroencephalography (EEG) recorded with subdural grid electrodes was retrospectively analyzed in 19 patients with drug-resistant focal epilepsy. Interictal spikes and ripples were automatically detected in slow-wave sleep using the publicly available Delphos software. We found that rates of spikes, ripples and ripples coupled to spikes (spikeripples) were higher inside the seizure-onset zone (p < 0.001). A comparison of receiver operating characteristic curves revealed that spikeripples slightly delineated the seizure-onset zone channels, but did this significantly better than spikes (p < 0.001). Ripples were more frequent in the eloquent neocortex than in the remaining non-seizure onset zone areas (p < 0.001). This was due to the higher rates of pure ripples (p < 0.001; median rates 3.3/min vs. 1.4/min), whereas spikeripple rates were not significantly different (p = 0.87). Pure ripples identified healthy channels significantly better than chance (p < 0.001). Our findings suggest that, in contrast to epileptic spikeripples, pure ripples are mainly physiological. They may be considered, in addition to electrical stimulation, to delineate eloquent cortex in pre-surgical patients. Since we applied open source software for detection, our approach may be generally suited to tackle a variety of research questions in epilepsy and cognitive science. Introduction High-frequency oscillations (HFOs), traditionally divided into ripples (80-250 Hz) and fast ripples (250-500 Hz), are a promising marker of epileptic activity. They have not only been directly linked to seizures ; moreover, there is a by now an extensive body of evidence on the value of interictal HFOs: resecting HFO-generating tissue has been associated with seizure-free outcome, HFO rates correlated with response to electrical stimulation, HFOs were suppressed by antiepileptic medication, and ripples may identify patients at risk of developing epilepsy. Nonetheless, HFOs are rarely analyzed in clinical routine settings. One reason may be that the visual identification is time-consuming, an obstacle that may be overcome by the increasing efficiency of automatic detectors. Another key aspect is that ripples are likely involved in memory consolidation, which may open new avenues for cognitive science, but impairs specificity if ripples are analyzed in epilepsy. To develop strategies that reliably distinguish pathologic from physiologic HFOs is thus of imminent importance for researchers from both fields. To design studies on potential approaches is, however, challenging, considering that most intracranial recordings are from patients with epilepsy and that in these subjects, there is both epileptic and physiologic activity. A fruitful approach has been to hypothesize that sleep influences epileptic and physiologic ripples differently. Following this concept, it was revealed that HFOs from epileptic and 'normal' channels are synchronized to distinct phases of sleep slow waves, and that they are associated with different sleep stages. Moreover, systematic differences in HFO amplitude and frequency have been reported, though not always consistent and often with significant overlap. A third approach has been to define epileptic ripples based on coupling to epileptic spikes. Several recent studies suggest that such 'spike-ripples' may indeed be superior to ripples or spikes, regarding the delineation of seizure-generating areas and the prediction of seizure risk. On the other hand, however, one study also found that spike-HFOs performed no better than spikes. The additional value of HFOs is thus still subject to debate. Finally, it has remained unclear whether the remaining 'pure' ripples, i.e., those occurring independently from spikes, are truly physiological. We thus set out, in a retrospective analysis of subdural grid electrode recordings from patients with drug-resistant focal epilepsy, to explore whether such 'pure' ripples are specifically generated in 'healthy' brain areas, identified based on our patients' response to electrical stimulation. Moreover, we aimed to provide additional evidence for the concept that spike-ripples are truly epileptic, which would underline that analyzing coupling to interictal spikes is indeed a promising strategy to distinguish physiologic from epileptic ripples. Patient Selection We considered all patients with drug-resistant focal epilepsy who, as part of their evaluation for epilepsy surgery, had undergone the implantation of subdural grid electrodes at the Freiburg Epilepsy Center in 2008 or 2009. From this cohort, subjects with contacts recording from the eloquent cortex, identified based on cortical stimulation, were selected. Recruitment was restricted to the specified period because afterwards a new video-EEG recording system was in use, and we aimed not to examine a mixed dataset obtained with different hardware setups. This study was approved by the Ethics Commission at the University Medical Center Freiburg (No. 69/18) and written informed consent was obtained from all patients. Electrical Stimulation and Assignment of Grid Contacts Electrode grids produced by Ad-Tech (Ad-Tech Medical Instrument Corporation, Racine, WI, USA), which were implanted after open craniotomy. Grids had 32, 48 or 64 contacts, each with a center-to-center distance of 10 mm and an exposed surface diameter of 2.3 mm. The decisions on the size of the grid, its position and the stimulation protocols were made solely by the attending physician and thus not influenced by this study. Cortical stimulation was performed during continuous video-EEG monitoring on all contacts, except for those that were obviously damaged, i.e., with frequent artifacts or high impedance. Pairs of electrodes were stimulated at 50 Hz with a biphasic rectangular pulse (width 0.25 ms) for 10 s. Stimulation was repeated every 30 to 60 s, with gradually increased electrical currents (max. 15 mA) until the patient had a seizure or symptoms indicating that the stimulated tissue was involved in a particular function. Typical motor symptoms like clonus, tonic movements or weakness, and various sensory phenomena like paresthesia, hallucinations, nausea or pain were documented in a stimulation protocol, which was retrospectively analyzed by our study team. To examine verbal impairment, a series of tasks was performed, which typically included an assessment of reading, serial and repetitive language, body commands and a token test. We relied on the assessment of the attending physician, i.e., on data generated independently from this study, to determine whether or not an electrode contact was located on the functional cortex. The seizure onset zone (SOZ) was defined by contacts with a clearly ictal EEG pattern within two seconds of seizure onset. Seizure onset was defined based on EEG and semiology, as part of the clinical routine evaluation and under the supervision of the attending physician. We identified SOZ contacts based on this assessment, i.e., based on data generated independently from this study. All remaining channels were classified as 'non-SOZ' channels. Detection of Interictal Epileptic Spikes and Ripples Intracranial EEG was recorded with a Neurofile NT system (IT-Med, Usingen, Germany). The sampling rate was 1024 kHz and a low-pass filter with 450 Hz cut-off frequency was applied. Forehead electrodes were used as an amplifier ground. For each patient, we selected a 1 h segment of non-rapid eye movement sleep, at least two hours before and after a seizure. We took great care to ensure that this segment contained no clear or only very sparse muscle artifacts. Interictal epileptic spikes and ripples (80-250 Hz) were identified in bipolar montages using the Delphos detector (Version 1.0.1; ) within the open source software Anywave. A detailed description of the algorithm was provided in the original publications. This detector was benchmarked against previously published methods. The default settings were kept (number of voices 12, vanishing moment 20, threshold 40, oscillation width threshold 1.4, oscillation frequency spread threshold 10, spike width threshold 1.3, spike frequency spread threshold 11). Further analyses were performed with Matlab R2019b (Version 9.7; Mathworks, Natick, MA, USA). Spike-ripples were defined as spikes and ripples that co-occurred within 100 ms. Quantifying Performance of Different Biomarkers To quantify how reliably a biomarker could distinguish SOZ from non-SOZ channels, receiver operating characteristic (ROC) curves and their area under the curve (AUC) were computed using the Matlab built-in function 'perfcurve'. We also determined the partial AUC (pAUC) between 100% and 85% specificity, as has been suggested in a previous study comparing the diagnostic values of spikes and high-frequency oscillations in patients with epilepsy. This parameter is particularly useful if ROC curves tend to reach a plateau, reflecting that a classifier performs poorly in a distinct range of thresholds. Reproducing our colleagues' approach, the 'raw' pAUC values were divided by the maximum possible area, i.e., 0.15, to obtain an index ranging from 0 to 1. It may be noteworthy that the expected AUC for random classification is 0.5, whereas the chance-level pAUC, if normalized as in our study, should only be 0.075. Statistical Analysis We did not assume that the data were normally distributed. Therefore, the median was specified as a measure of central tendency and the range as a measure of dispersion. Hypothesis tests were performed as two-tailed tests. A significance level of 5% was chosen. Paired data were compared with a Wilcoxon signed-rank test, unpaired data with a Wilcoxon rank sum test, and Spearman's rank order correlation was performed to investigate whether the patient-specific performance of a biomarker was systematically linked to the patient's median spike rate. Furthermore, we conducted a permutation test to investigate whether spike-ripples performed significantly better than spikes regarding the distinction of SOZ from non-SOZ channels. First, for each channel, the spike-ripple and spike rates were transformed to biomarker-specific ranks, i.e., a spike-ripple and a spike rank, relative to the remaining channels, was assigned to each channel. It may be noteworthy that this step does not alter the two ROC curves because those are invariant to monotone increasing transformations of the measurement scale. We then calculated the AUC difference based on our empirical data: Diff(AUC) empirical = AUC spike-ripples − AUC spikes Then, for each channel independently, we randomly swapped group labels, and again computed Diff(AUC) ('Diff(AUC) surrogate '). This step was repeated 100,000 times to compute a distribution of Diff(AUC) surrogate. The performance of spike-ripples and spikes was considered significantly different if Diff(AUC) empirical ranked above 97.5% or below 2.5% of all Diff(AUC) surrogate. A detailed investigation of the properties of such an approach can be found, e.g., in. This procedure was also performed for the pAUC difference, and finally, in a slightly adapted fashion, for the functional vs. remaining non-SOZ classification. Patients and Channels Nineteen subjects (9 females, 10 males; age: median 38 years, range 11-54 years, see Table 1 for more clinical data) met the inclusion criteria. The majority of our patients had a neocortical lesion, most often focal cortical dysplasia (n = 12 patients), whereas a mesiotemporal pathology was rare. Two patients had no clear epileptogenic lesion on their MRI. We analyzed recordings from all major lobes and a variety of functional regions, including primary motor (n = 15 patients) and sensory cortex (n = 13), Broca's (n = 4) and Wernicke's (n = 7) area. In patient 2, the grid did not cover the seizure onset zone (SOZ); this patient therefore had to be excluded from the patient-specific comparisons of SOZ and non-SOZ channels described below. Epilepsy surgery was performed in all but this patient, most often extended lesionectomy, and more than half of them (n = 11 patients) became completely seizure-free (median follow-up 6 months). Biomarkers of the Seizure-Onset Zone Interictal spikes, ripples and spike-ripples have each been suggested as biomarkers of epileptogenic tissue. First, we therefore investigated whether this applies also to our particular dataset, which only contained recordings from the neocortex and with subdural grid electrodes. Comparing all identified channels, pooled across patients, it was revealed that the seizure onset zone generated significantly higher rates of interictal spikes (p < 0.001, Wilcoxon rank sum test; SOZ: n = 258 channels, non-SOZ: n = 635 channels), ripples (p < 0.001) and spike-ripples (p < 0.001) than the remaining non-SOZ regions (Figure 1). To perform a similar analysis at the level of patients, we also compared each subject's median SOZ channel, i.e., the channel with a median rate from this subject's SOZ channels, to the median non-SOZ channel. Again, the rates of interictal spikes (p = 0.001, Wilcoxon signedrank test; n = 18 patients), ripples (p = 0.004) and spike-ripples (p < 0.001) were higher inside the SOZ. In summary, these findings suggest that the analysis of all three biomarkers in subdural grid electrode recordings reveals localizing information on seizure-generating neocortical regions. Brain Sci. 2021, 11, x FOR PEER REVIEW 6 of 14 biomarkers in subdural grid electrode recordings reveals localizing information on seizure-generating neocortical regions. Systematic Comparison of Diagnostic Value To compare how reliably these biomarkers could generally distinguish SOZ from non-SOZ channels, we analyzed their receiver operating characteristic (ROC) curves Systematic Comparison of Diagnostic Value To compare how reliably these biomarkers could generally distinguish SOZ from non-SOZ channels, we analyzed their receiver operating characteristic (ROC) curves (Figure 2A). The area under the curve (AUC), as the standard parameter quantifying performance, was the highest for spike-ripples (0.664), intermediate for ripples (0.640) and lowest for spikes (0.628). Noticing that the shape of these curves was especially different for high specificities, we also computed the partial AUC (pAUC) between 100% and 85%, which was also highest for spike-ripples (0.252), intermediate for ripples (0.244) and lowest for spikes (0.181). Permutation-based testing revealed that the difference between spike-ripples and spikes was significant, both for AUC (p < 0.001; Figure 2B) and pAUC (p < 0.001; Figure 2C). It can thus be concluded that in our dataset, focusing on interictal spikes with a co-occurring ripple oscillation improved our identification of SOZ channels significantly. (p < 0.001; Figure 2C). It can thus be concluded that in our dataset, focusing on interictal spikes with a co-occurring ripple oscillation improved our identification of SOZ channels significantly. Diagnostic Value at the Level of Individual Patients Considering that the impact of a biomarker on clinical decision-making depends on its performance in individual patients, we also computed patient-specific ROC curves, and the corresponding AUC and pAUC ( Figure 2D). For both spike-ripples and spikes, a broad range of values was obtained, reflecting that the SOZ could be nicely delineated in several patients, whereas performance was poor in others. In summary, there was no significant difference between spike-ripples and spikes in our group of patients (AUC: p = 0.71, pAUC: p = 0.29; Wilcoxon signed-rank test; n = 18 patients). There was a significant correlation between our patients' median spike rate and their pAUC ( Figure 2E), both for spike-ripples (rho = 0.68, p = 0.002; Spearman's rank order correlation) and spikes (rho = 0.54, p = 0.02). Moreover, the patient-specific pAUC difference between spike-ripple-and spike-based classification correlated with the subject's median spike rate (rho = 0.48, p = 0.042). These results suggest that both biomarkers perform better in patients with many spikes and that in these individuals, spike-ripples may be superior. Interictal spike-ripples identify SOZ channels slightly but significantly better than spikes. (A) ROC curves, SOZ vs. non-SOZ classification, channels pooled across patients. The AUC for spike-ripples (red line, 0.664) was slightly higher than for all spikes (blue line, 0.628). (B) To investigate whether this difference was significant, we applied permutation-based hypothesis testing. The probability of obtaining an AUC difference that is as high as or higher than our empirically measured value (bold vertical line) by chance (grey bars represent surrogate data) was less than 5 % (two-tailed p < 0.001). (C) Same analysis for pAUC (between 85 and 100 % specificity), as applied in. Again, spike ripples classified significantly better (p < 0.001). (D) At the level of individual patients, there was no significant difference between the pAUC for spike-ripples and spikes (p = 0.29). Note the broad range of values, reflecting that the SOZ could be nicely delineated in Figure 2. Interictal spike-ripples identify SOZ channels slightly but significantly better than spikes. (A) ROC curves, SOZ vs. non-SOZ classification, channels pooled across patients. The AUC for spikeripples (red line, 0.664) was slightly higher than for all spikes (blue line, 0.628). (B) To investigate whether this difference was significant, we applied permutation-based hypothesis testing. The probability of obtaining an AUC difference that is as high as or higher than our empirically measured value (bold vertical line) by chance (grey bars represent surrogate data) was less than 5 % (two-tailed p < 0.001). (C) Same analysis for pAUC (between 85 and 100 % specificity), as applied in. Again, spike ripples classified significantly better (p < 0.001). (D) At the level of individual patients, there was no significant difference between the pAUC for spike-ripples and spikes (p = 0.29). Note the broad range of values, reflecting that the SOZ could be nicely delineated in several patients, whereas performance was poor in others. (E) Both biomarkers classify better in patients with many interictal spikes. There was a significant correlation between our patients' median spike rate and their pAUC, both for spike-ripples (red, p = 0.002) and spikes (blue, p = 0.02). Each dot corresponds to one patient. (F) Spike-ripples are a better classifier in patients with many spikes. There was a significant correlation between our patients' median spike rate and their pAUC difference between spike-ripples and spikes (p = 0.042). Diagnostic Value at the Level of Individual Patients Considering that the impact of a biomarker on clinical decision-making depends on its performance in individual patients, we also computed patient-specific ROC curves, and the corresponding AUC and pAUC ( Figure 2D). For both spike-ripples and spikes, a broad range of values was obtained, reflecting that the SOZ could be nicely delineated in several patients, whereas performance was poor in others. In summary, there was no significant difference between spike-ripples and spikes in our group of patients (AUC: p = 0.71, pAUC: p = 0.29; Wilcoxon signed-rank test; n = 18 patients). There was a significant correlation between our patients' median spike rate and their pAUC ( Figure 2E), both for spike-ripples (rho = 0.68, p = 0.002; Spearman's rank order correlation) and spikes (rho = 0.54, p = 0.02). Moreover, the patient-specific pAUC difference between spike-ripple-and spike-based classification correlated with the subject's median spike rate (rho = 0.48, p = 0.042). These results suggest that both biomarkers perform better in patients with many spikes and that in these individuals, spike-ripples may be superior. 'Pure' Ripples in Eloquent Cortex Our main aim was to investigate whether 'pure' ripples, i.e., those occurring independently from spikes, are more frequent in the eloquent neocortex. First, we therefore identified channels with a functional response to stimulation and compared them to all remaining non-SOZ channels. The ripple rate in general was higher in the eloquent cortex than in the remaining non-SOZ channels (p < 0.001; Wilcoxon rank sum test; eloquent cortex: n = 311 channels, non-SOZ remaining: n = 282 channels; Figure 3), and this difference was due to a significant difference in 'pure' ripples (p < 0.001; median rates 3.3/min vs. 1.4/min). Spike-ripple rates, in contrast, were not significantly different (p = 0.87). To perform a similar analysis at the level of patients, we then compared each subject's median channels between the two groups, in analogy to the SOZ vs. non-SOZ comparison described above. Again, ripples (p = 0.03, Wilcoxon signed-rank test; n = 19 patients) and specifically 'pure' ripples (p = 0.02) were more frequent in eloquent cortex, whereas spike-ripples did not differ significantly (p = 0.98). It can thus be concluded that neocortical 'pure' ripples likely reflect physiologic fast oscillatory activity. several patients, whereas performance was poor in others. (E) Both biomarkers classify better in patients with many interictal spikes. There was a significant correlation between our patients' median spike rate and their pAUC, both for spike-ripples (red, p = 0.002) and spikes (blue, p = 0.02). Each dot corresponds to one patient. (F) Spike-ripples are a better classifier in patients with many spikes. There was a significant correlation between our patients' median spike rate and their pAUC difference between spike-ripples and spikes (p = 0.042). 'Pure' Ripples in Eloquent Cortex Our main aim was to investigate whether 'pure' ripples, i.e., those occurring independently from spikes, are more frequent in the eloquent neocortex. First, we therefore identified channels with a functional response to stimulation and compared them to all remaining non-SOZ channels. The ripple rate in general was higher in the eloquent cortex than in the remaining non-SOZ channels (p < 0.001; Wilcoxon rank sum test; eloquent cortex: n = 311 channels, non-SOZ remaining: n = 282 channels; Figure 3), and this difference was due to a significant difference in 'pure' ripples (p < 0.001; median rates 3.3/min vs. 1.4/min). Spike-ripple rates, in contrast, were not significantly different (p = 0.87). To perform a similar analysis at the level of patients, we then compared each subject's median channels between the two groups, in analogy to the SOZ vs. non-SOZ comparison described above. Again, ripples (p = 0.03, Wilcoxon signed-rank test; n = 19 patients) and specifically 'pure' ripples (p = 0.02) were more frequent in eloquent cortex, whereas spikeripples did not differ significantly (p = 0.98). It can thus be concluded that neocortical 'pure' ripples likely reflect physiologic fast oscillatory activity. Delineation of Eloquent Cortex-across and in Individual Patients Finally, we explored whether 'pure' ripples might be of value regarding the delineation of the functional neocortex in pre-surgical patients with epilepsy. To this end, we also computed a receiver operating characteristic (ROC) curve for the functional vs. remaining non-SOZ classification ( Figure 4A). Across patients, 'pure' ripples identified functional tissue significantly better than chance (AUC = 0.64; p < 0.001, permutation test; Figure 4B). Delineation of Eloquent Cortex-Across and in Individual Patients Finally, we explored whether 'pure' ripples might be of value regarding the delineation of the functional neocortex in pre-surgical patients with epilepsy. To this end, we also computed a receiver operating characteristic (ROC) curve for the functional vs. remaining non-SOZ classification ( Figure 4A). Across patients, 'pure' ripples identified functional tissue significantly better than chance (AUC = 0.64; p < 0.001, permutation test; Figure 4B). In line with these findings, we found that 'pure' ripples nicely delineated an eloquent neocortex in some patients ( Figure 5). Our data thus suggest that 'pure' ripples could be of value, in addition to electrical stimulation, if functional regions have to be mapped-but to what extent they are a reliable tool clearly has to be examined in a larger systematic study. Brain Sci. 2021, 11, x FOR PEER REVIEW 9 of 14 but to what extent they are a reliable tool clearly has to be examined in a larger systematic study. To investigate whether 'pure' ripples identify eloquent regions significantly better than chance, we again applied permutation-based testing. The probability of obtaining an AUC that is as high as or higher than our empirically measured value (bold vertical line) by chance (grey bars represent surrogate data) was less than 5% (two-tailed p < 0.001). To investigate whether 'pure' ripples identify eloquent regions significantly better than chance, we again applied permutation-based testing. The probability of obtaining an AUC that is as high as or higher than our empirically measured value (bold vertical line) by chance (grey bars represent surrogate data) was less than 5% (two-tailed p < 0.001). Brain Sci. 2021, 11, x FOR PEER REVIEW 10 of 14 Figure 5. Delineation of eloquent neocortex based on 'pure' ripples in patient 9. Schematic illustrates approximate location of grid electrodes, MRI sections below are displayed for reference. Each filled circle represents one bipolar channel, with its radius proportional to its 'pure' ripple rate. Note that 'pure' ripples were particularly frequent in clearly functional neocortex, identified based on electrical stimulation. In this patient, these regions correspond nicely to the typical location of Broca's area, primary motor and sensory cortex. Discussion The main novel finding of this study is that in interictal recordings from subdural grid electrodes, 'pure' ripples are more frequent in the unequivocally functional cortex, identified based on electrical stimulation. Moreover, we report that spike-ripples identify Figure 5. Delineation of eloquent neocortex based on 'pure' ripples in patient 9. Schematic illustrates approximate location of grid electrodes, MRI sections below are displayed for reference. Each filled circle represents one bipolar channel, with its radius proportional to its 'pure' ripple rate. Note that 'pure' ripples were particularly frequent in clearly functional neocortex, identified based on electrical stimulation. In this patient, these regions correspond nicely to the typical location of Broca's area, primary motor and sensory cortex. Discussion The main novel finding of this study is that in interictal recordings from subdural grid electrodes, 'pure' ripples are more frequent in the unequivocally functional cortex, identified based on electrical stimulation. Moreover, we report that spike-ripples identify seizure onset better than spikes, which underlines that they are highly pathologic. Various aspects of these findings shall be discussed in detail below. 'Pure' Ripples: A Reproducible Marker of Eloquent Neocortex Our main finding is that 'pure' ripples, i.e., ripples not associated with spikes, occur significantly more often in the eloquent neocortex than in remaining non-SOZ areas. The rates we report are very similar to a recent study on physiologic HFOs, which also found the highest rates in typically eloquent regions. This may be noteworthy because the dataset we analyzed was different in several important aspects, and because we applied a different algorithm for automated detection. Furthermore, we provide solid evidence for the concept that our 'pure' ripples are truly physiological because they were recorded from unequivocally functional neocortex, as demonstrated by stimulation, and because the difference between eloquent and remaining non-SOZ channels was only significant for 'pure' ripples, and not for spike-ripples. Together with a previous study on specific coupling to slow waves, these data suggest that HFOs in 'healthy' tissue are generated based on distinct mechanisms. Our recordings covered a variety of regions, including primary motor and sensory cortex, Broca's and Wernicke's area. It thus seems unlikely that 'pure' ripples are directly linked to only one specific function, but future studies will have to investigate such questions in detail. Another aspect is that the stimulation as such suppresses HFOs, and it may be interesting to explore if this effect applies particularly to physiological HFOs. Anyway, it can be concluded that 'pure' ripples are indeed a marker of eloquent regions, and that they probably reflect physiologic brain activity. Delineation of Epileptogenic Tissue AAre Spike-Ripples a Better Biomarker? We report that interictal spikes with a co-occurring ripple oscillation identify the seizure onset zone (SOZ) channels significantly better than spikes. This finding is in line with previous work and the stereotyped coupling of HFOs to spike-slow waves may also be characteristic of epileptogenic areas. At the level of individual patients, however, there was no significant difference between the two biomarkers, which is consistent with a previous study analyzing stereotactic electroencephalography in 30 patients. Therefore, in summary, do spike-ripples provide additional value, or not? It should be acknowledged that both ours and the previous study had limited power to detect differences at the patient level-and our channel-based analysis does suggest that spike-ripples are slightly-but significantly-superior. Furthermore, our study underlines that patient-specific features must be taken into account: while performance differed substantially between individuals, spike-ripples were particularly better if high specificity was required, and in patients with many spikes. In conclusion, spike-ripples may not be suited to delineate non-epileptogenic areas but are superior if clearly pathologic tissue has to be identified. Interactions between Spikes and Ripples High-frequency oscillations (HFOs) sometimes co-occur with spikes, and interactions between these two kinds of activity have been examined since the first reports on HFOs in humans. A potential confounder in any such study is that high-pass filtering of sharp transients may artificially generate 'false' ripples. Such effects can be minimized by using filters with a finite impulse response, implementing a criterion of, e.g., at least four oscillatory cycles, or by performing a time-frequency decomposition as in the detection algorithm we applied. Despite these methodological challenges, several lines of evidence suggest that spike-ripples are a distinct pathophysiological phenomenon: ripples riding on spikes are frequently visible even in unfiltered traces, as recent studies have revealed their value in different clinical scenarios, and we have reported that spikes with HFOs have a distinct single-neuron correlate. The current study demonstrates that spike-ripples and 'pure' ripples are two distinct kinds of activity, the former being epileptic and the latter being physiological, and which clearly underlines the relevance of understanding interactions between spikes and HFOs. Limitations and Outlook This study is limited in several ways. Focusing on subdural grid recordings from neocortical tissue, we obtained a relatively homogeneous dataset, but this also implies that our conclusions do not necessarily apply to other regions, such as the mesial temporal lobe. Another issue is that spikes and ripples were detected by an algorithm that had not been developed at our center, based on data from depth electrodes recorded with a setup from a different company. We decided not to modify the default settings of this detector-and one might speculate that if we had done so, its accuracy and thus the performance of our biomarkers, would have been improved. On the other hand, we thus demonstrated that HFO analysis is feasible without time-consuming visual identification, 'optimization' or even the new development of an automated algorithm. In addition, the distinction of epileptic and physiologic HFOs may be improved by integrating our coupling-based strategy in a multivariate classifier, which also takes other HFO features or interactions with sleep into account. Such approaches promise to open up new avenues for a variety of research questions, in both epilepsy and cognitive science. Conclusions This study suggests that 'pure' ripples are physiological and a marker of eloquent neocortex. Spike-ripples, in contrast, provide additional value regarding the delineation of epileptogenic tissue. Considering that both types of events were detected with open source software that was not developed at our center, our approach should permit to efficiently tackle a variety of research questions, in both epilepsy and cognitive science. |
/*
* Copyright (c) 2013-2020 <NAME> <<EMAIL>>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/param.h>
#include <sys/epoll.h>
#include <sys/sendfile.h>
#include <sys/syscall.h>
#include <sched.h>
#include "kore.h"
#include "seccomp.h"
#if defined(KORE_USE_PGSQL)
#include "pgsql.h"
#endif
#if defined(KORE_USE_TASKS)
#include "tasks.h"
#endif
static int efd = -1;
static u_int32_t event_count = 0;
static struct epoll_event *events = NULL;
void
kore_platform_init(void)
{
long n;
kore_seccomp_init();
if ((n = sysconf(_SC_NPROCESSORS_ONLN)) == -1) {
kore_debug("could not get number of cpu's falling back to 1");
cpu_count = 1;
} else {
cpu_count = (u_int16_t)n;
}
}
void
kore_platform_worker_setcpu(struct kore_worker *kw)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(kw->cpu, &cpuset);
if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) == -1) {
kore_debug("kore_worker_setcpu(): %s", errno_s);
} else {
kore_debug("kore_worker_setcpu(): worker %d on cpu %d",
kw->id, kw->cpu);
}
}
void
kore_platform_event_init(void)
{
if (efd != -1)
close(efd);
if (events != NULL)
kore_free(events);
if ((efd = epoll_create(10000)) == -1)
fatal("epoll_create(): %s", errno_s);
event_count = worker_max_connections + nlisteners;
events = kore_calloc(event_count, sizeof(struct epoll_event));
}
void
kore_platform_event_cleanup(void)
{
if (efd != -1) {
close(efd);
efd = -1;
}
if (events != NULL) {
kore_free(events);
events = NULL;
}
}
void
kore_platform_event_wait(u_int64_t timer)
{
u_int32_t r;
struct kore_event *evt;
int n, i, timeo;
if (timer == KORE_WAIT_INFINITE)
timeo = -1;
else
timeo = timer;
n = epoll_wait(efd, events, event_count, timeo);
if (n == -1) {
if (errno == EINTR)
return;
fatal("epoll_wait(): %s", errno_s);
}
if (n > 0) {
kore_debug("main(): %d sockets available", n);
}
r = 0;
for (i = 0; i < n; i++) {
if (events[i].data.ptr == NULL)
fatal("events[%d].data.ptr == NULL", i);
r = 0;
evt = (struct kore_event *)events[i].data.ptr;
if (events[i].events & EPOLLIN)
evt->flags |= KORE_EVENT_READ;
if (events[i].events & EPOLLOUT)
evt->flags |= KORE_EVENT_WRITE;
if (events[i].events & EPOLLERR ||
events[i].events & EPOLLHUP ||
events[i].events & EPOLLRDHUP)
r = 1;
evt->handle(events[i].data.ptr, r);
}
}
void
kore_platform_event_all(int fd, void *c)
{
kore_platform_event_schedule(fd,
EPOLLIN | EPOLLOUT | EPOLLRDHUP | EPOLLET, 0, c);
}
void
kore_platform_event_schedule(int fd, int type, int flags, void *udata)
{
struct epoll_event evt;
kore_debug("kore_platform_event_schedule(%d, %d, %d, %p)",
fd, type, flags, udata);
evt.events = type;
evt.data.ptr = udata;
if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &evt) == -1) {
if (errno == EEXIST) {
if (epoll_ctl(efd, EPOLL_CTL_MOD, fd, &evt) == -1)
fatal("epoll_ctl() MOD: %s", errno_s);
} else {
fatal("epoll_ctl() ADD: %s", errno_s);
}
}
}
void
kore_platform_schedule_read(int fd, void *data)
{
kore_platform_event_schedule(fd, EPOLLIN | EPOLLET, 0, data);
}
void
kore_platform_schedule_write(int fd, void *data)
{
kore_platform_event_schedule(fd, EPOLLOUT | EPOLLET, 0, data);
}
void
kore_platform_disable_read(int fd)
{
if (epoll_ctl(efd, EPOLL_CTL_DEL, fd, NULL) == -1)
fatal("kore_platform_disable_read: %s", errno_s);
}
void
kore_platform_enable_accept(void)
{
struct listener *l;
struct kore_server *srv;
kore_debug("kore_platform_enable_accept()");
LIST_FOREACH(srv, &kore_servers, list) {
LIST_FOREACH(l, &srv->listeners, list)
kore_platform_event_schedule(l->fd, EPOLLIN, 0, l);
}
}
void
kore_platform_disable_accept(void)
{
struct listener *l;
struct kore_server *srv;
kore_debug("kore_platform_disable_accept()");
LIST_FOREACH(srv, &kore_servers, list) {
LIST_FOREACH(l, &srv->listeners, list) {
if (epoll_ctl(efd, EPOLL_CTL_DEL, l->fd, NULL) == -1) {
fatal("kore_platform_disable_accept: %s",
errno_s);
}
}
}
}
void
kore_platform_proctitle(const char *title)
{
kore_proctitle(title);
}
#if defined(KORE_USE_PLATFORM_SENDFILE)
int
kore_platform_sendfile(struct connection *c, struct netbuf *nb)
{
off_t smin;
ssize_t sent;
size_t len, prevoff;
prevoff = nb->fd_off;
smin = nb->fd_len - nb->fd_off;
len = MIN(SENDFILE_PAYLOAD_MAX, smin);
resend:
sent = sendfile(c->fd, nb->file_ref->fd, &nb->fd_off, len);
if (sent == -1) {
if (errno == EAGAIN) {
c->evt.flags &= ~KORE_EVENT_WRITE;
return (KORE_RESULT_OK);
}
return (KORE_RESULT_ERROR);
}
if (nb->fd_off - prevoff != (size_t)len)
goto resend;
if (sent == 0 || nb->fd_off == nb->fd_len) {
net_remove_netbuf(c, nb);
c->snb = NULL;
}
return (KORE_RESULT_OK);
}
#endif
void
kore_platform_sandbox(void)
{
kore_seccomp_enable();
}
|
def to_native_units(self, motor):
assert abs(self.rotations_per_second) <= motor.max_rps,\
"invalid rotations-per-second: {} max RPS is {}, {} was requested".format(
motor, motor.max_rps, self.rotations_per_second)
return self.rotations_per_second/motor.max_rps * motor.max_speed |
The KATRIN pre-spectrometer at reduced filter energy The Karlsruhe Tritium Neutrino (KATRIN) experiment will determine the mass of the electron neutrino with a sensitivity of 0.2 eV (90% CL) via a measurement of the -spectrum of gaseous tritium near its endpoint of E0 = 18.57 keV. An ultra-low background of about b = 10 mHz is among the requirements on reaching this sensitivity. In the KATRIN main beam line, two spectrometers of MAC-E filter type are used in tandem configuration. This setup, however, produces a Penning trap, which could lead to increased background. We have performed test measurements showing that the filter energy of the pre-spectrometer can be reduced by several keV in order to diminish this trap. These measurements were analyzed with the help of a complex computer simulation, modeling multiple electron reflections from both the detector and the photoelectric electron source used in our test setup. The KATRIN Experiment The KATRIN experiment will determine the mass of the electron antineutrino m (eff) e = m 2 i |U ei | 2 via a high precision measurement of the -decay kinematics at the endpoint E 0 = 18.57 keV of the -spectrum of tritium with a sensitivity of 0.2 eV (90% C.L.). The latest upper limits obtained with this modelindependent method and the isotope tritium as -emitter are from the experiments at Mainz: m (ef f ) e < 2.3 eV (95% C.L.) and Troitsk: m (ef f ) e < 2.05 eV (95% C.L.). Figure 1 shows a schematic overview over the 70 m long KATRIN setup: T 2 -gas with an activity of 10 11 Bq is recirculated in the so-called windowless gaseous tritium source (WGTS). -decay electrons are guided by a magnetic field towards the spectrometers. Both the pre-and main spectrometer are of the MAC-E filter type. Figure 1. Overview over the KATRIN experiment with the potentials and magnetic field strengths given in the KATRIN design report. With these values, the setup contains a Penning trap for electrons between the pre-and main-spectrometer at B sol = 4.5 T. A change of the pre-spectrometer potential from U PS = −18.27 kV by several kV towards smaller absolute values will diminish this trap. Electrons are guided through these spectrometers along the magnetic field lines and are decelerated by an electric filter potential U f in each of the spectrometers. U f is the high voltage with negative polarity applied to the spectrometer, or more exactly, to its inner electrode system. In KATRIN, we use the independent filter potentials U f = U PS for the pre-and U f = U MS for the main-spectrometer. Only those electrons with an energy E larger than the filter energy qU f (see Section 2) are transmitted through a spectrometer and are reaccelerated to their original energy. Here q = −e is KATRIN will not be able to resolve the different neutrino mass eigenstates m i, but will determine a weighted average m (eff) of the neutrino mass states m i according to their mixing U ei with the electron neutrino. the negative electron charge, and the filter energy qU f is the maximum potential energy of an electron in the spectrometers. The pre-spectrometer (PS) with an energy resolution of ∆E PS ≈ 100 eV is the first filter for the -decay electrons. It reduces the flux of -decay electrons into the main-spectrometer (MS), lowering the rate of background electrons created in collisions with residual gas molecules. Having an energy resolution of ∆E MS = 0.93 eV, the MS scans the last 30 eV of the T 2 -spectrum which contain the information on the neutrino mass. Finally, the electrons transmitted by the MS are counted by a 148 pixel PIN diode with an energy resolution of ∆E det ≈ 1 keV. The motivation for our investigations is the following: Inside the MS, the -decay electrons can start multi-step processes leading to free electrons. These can be accelerated towards the detector and to energies around 18 keV by the MS potential. The energy resolution of KATRIN's detector is about ∆E det ≈ 1 keV. Therefore, these electrons cannot be distinguished from signal electrons produced by the tritium -decay. Thus, the background can rise above KATRIN requirement of b = 10 mHz. Therefore, the flux of -decay electrons into the MS should be kept low. The flux of -electrons can be minimized by keeping the filter potentials of MS and PS relatively close (e.g. U PS = −18.27 kV and U MS ≈ −18.57 kV). However, using the B-fields (B sol = 4.5 T, Fig. 1) mentioned in the KATRIN design report the region between the two spectrometers is a Penning trap for electrons. By multi step processes this trap can lead to increased background as well. A reduction of the pre-spectrometer potential from U PS = −18.27 kV by several kV towards smaller absolute values will diminish this trap. The optimum value of U PS which minimizes the background has to be determined experimentally. If qU PS is reduced by several keV however, the -electrons with energies close to E 0 = 18.57 keV will retain a surplus energy E sur = E 0 − qU PS in the order of several keV inside the PS. Thus, electrons have higher speed and may no longer be guided by the magnetic field. This behaviour, leading to transmission losses, was already observed in the MAC-E filter of the Mainz Neutrino Mass Experiment. For KATRIN, 100% transmission above the PS filter energy qU PS is required. We present two main results in this publication: (i) The requirement of 100% transmission at reduced PS filter energy qU PS is fulfilled. If it should turn out that the Penning trap between the two spectrometers cannot be suppressed by other means, the PS filter energy can be reduced by many keV in order to overcome this problem. (ii) The KATRIN collaboration is able to model the electron transport and the electron backscattering at the detector with high precision in agreement with experimental data. This allows detailed investigations of the experimental setup. This publication is organized as follows: In section 2 we review the operation principle of the MAC-E filter, Section 3 presents our experimental setup; our measurements and their analysis via custom simulation tools are presented in Sections 4 to 6. Section 7 discusses the background and adiabaticity at reduced PS filter energy in detail. Finally, our findings are summarized in Section 8. The MAC-E Filter Technique In this Section, we explain the principle of a MAC-E filter under standard conditions, i.e. when the filter energy qU f is close to (a few 10 eV) the energy E of the incoming electrons. The principle of a MAC-E filter is illustrated in Fig. 2: Two identical solenoids provide a guiding magnetic field. Ignoring drift motions which appear as higher order corrections, the -electrons enter the MAC-E filter and follow the guiding magnetic field lines along helix-like trajectories resulting from the cyclotron motion. This statement is true if the relative changes of the electric and magnetic field strength within a cyclotron length l cyc are small : Here, v is the electron velocity parallel to the guiding magnetic field line, cyc the cyclotron frequency, m e the electron mass, q = −e the negative electron charge and the relativistic factor. If Eq. 1 holds, there is a conserved adiabatic invariant, where denotes the orbital magnetic moment of the electron (see Section 12.5 in ) E ⊥ = E kin sin 2 () is the fraction of the kinetic energy E kin which can be attributed to the motion around the guiding B-field line. is the polar angle between the guiding B-field line and the electron momentum vector. In the following, we will use the symbol for the corresponding azimuthal angle. E = E kin cos 2 () is the fraction of the kinetic energy connected to the forward motion of the electron. Only E is analyzed by the MAC-E filter. p ⊥ denotes the fraction of the electron momentum perpendicular to the guiding B-field line. For KATRIN, the maximum electron energy is E 0 = 18.57 keV, thus one has ≤ 1.04. Therefore, is a good approximation for the conserved quantity, especially when electrons are slowed down by the electric field in the MAC-E filter: From Eq. 3 it is clear that the polar angle and E ⊥ are completely determined by the B-field and the kinetic energy of the electron. As B decreases towards the analyzing plane of the MAC-E filter, E ⊥ is minimized, providing the good energy resolution of the MAC-E filter. Electrons are guaranteed to be transmitted along the guiding Bfield line if their initial energy is large enough to overcome the spectrometer potential. The transmission probability T (E, qU f ) of a MAC-E filter is derived from Eq. 3 by integrating over all electrons which fulfill 0 < E = (E − qU f ) − E ⊥ in the central plane of the MAC-E filter. These electrons start at ground potential. Isotropically emitted electrons with starting energy E and from 0 to 90 have to be considered. Eq. 2 is used to transform between E ⊥ at the origin of the electrons with B s and the analyzing plane, with the minimum B-field strength B ana : Thus, the transmission probability T (E, qU f ) only depends on the magnetic field strengths, on the energy E of the incoming electron and on qU f, the filter energy. Below the interval specified in Eq. 4, the transmission probability is zero, above this interval it is unity (Fig. 2). The transmission function describes an energy high-pass filter, only electrons with an energy E above the filter potential qU f are transmitted. The energy resolution of the MAC-E filter ∆E f is equal to the maximum E ⊥ of an electron in its analyzing plane. E ⊥ in the analyzing plane is maximal if the polar angle is equal to 90 in the entry-side magnet. The conservation of allows to compute the resolution ∆E f : Considering electrons starting in the entry-side solenoid of the PS, one has to insert B s = B sol = 4.5 T and B ana,PS = 0.016 T. The formula gives an energy resolution ∆E PS = 64 eV if the PS is used with E = 18 keV electrons (cf. right side of Fig. 2). For a MAC-E filter and in the adiabatic approximation, the Lorentz force does not only result in a helix-like motion around the guiding magnetic field line but also in an azimuthal magnetron drift around the spectrometer symmetry axis. The orbit of a charged particle in a uniform B-field. Right: The orbit of a charged particle in a non-uniform B-field. Electrons which pass the MAC-E filter on its symmetry axis gyrate around the central magnetic field line in a helical cyclotron motion. Since the magnetic field is axially symmetric and the cyclotron radius is changing slowly, the electrons are in a quasi-constant magnetic field. For off-axis electrons, however, the magnetic field is asymmetric during a cyclotron motion. The radius of curvature of the electron trajectory is smaller in a stronger B-field. Therefore, the gradient ∇B results in an azimuthal B ∇B drift of the guiding center along a circle with constant magnetic field ( Fig. 3): The lower qU PS is in our investigations, the larger are E ⊥ and E and the azimuthal drift. The following section will show that our PS data can only be understood if the azimuthal drift is taken into account. The Pre-Spectrometer Test Setup This section describes the experimental setup used to measure the PS transmission with E = 18 keV electrons and a PS filter energy qU PS down to 1 keV, so that electrons retain a surplus energy E sur = E − qU PS of up to 17 keV in the PS. The B-field of the PS test setup (Fig. 4) is generated by two solenoids (B). The inner electrode system of the PS tank (G) consists of four parts: The ground electrodes (C) define the potential at the entry and exit of the PS, the shielding electrodes (D) were introduced to avoid a Penning trap leading to background, electrodes (E) are conical metal shields, the central part (F) is a wire electrode. The tank (G) and the shielding electrodes (D) are electrically connected. Both the tank and the electrodes are on negative high voltage. There is a longitudinal gap splitting electrodes (E) and There is a longitudinal gap splitting electrodes E) and F) into a left and right half. For our measurements, the left and right dipole half were electrically connected and therefore supplied with identical voltages. The dark blue arrow illustrates an electron trajectory. (F) into a left and right half. For our measurements, the left and right dipole half were electrically connected and therefore supplied with identical voltages. Previous measurements with this setup and a pressure of 10 −10 mbar inside the PS resulted in an average background rate of 17 ± 0.4 mHz in the energy window from 15 to 21 keV over the whole detector. For these, the tank (G) and shielding electrode (D) were kept at U tank = −18 kV, the inner electrodes (E) and (F) were put on U electrode = −18.5 kV. As this configuration does not produce any background related to particles stored in Penning traps, the same potential difference U electrode − U tank = −0.5 kV between the tank (G) and the electrode system (E,F) was used in our measurements. The potential inside the PS tank is a mixture of the electrode and tank potential, one has U PS = a U tank + b U electrode. Yet, the constants are a = 0 and b = 1 in good approximation. U PS and U electrode never differ by more than a few tens of V. This effect depends on the electron trajectory in the PS and is accounted for in our simulations (Section 6). For these, we have computed the actual electric field inside the PS using the methods described in. In the following we do not distinguish between qU PS and qU electrode in the text as their difference is negligible at keV surplus energies. In each of our measurement series, the tank voltage U tank was varied from -0.5 kV to about -17.5 kV, so that the PS filter energy qU PS ≈ qU electrode varied from about 1 keV to about 18 keV. A photoelectric electron source (E-GUN) (Fig. 5) mounted at the entry of the PS test setup (Fig. 4) was used to generate electrons with an energy of E = 18 keV for the measurements: A deuterium lamp (f) generates UV light with wavelengths in the range 185 nm < < 400 nm (6.7 eV > h > 3.1 eV). The light shines through a sapphire window (e) and a hollow ceramic insulator (d). The UV photons finally produce free electrons via the photoelectric effect in a thin gold layer (≈ 35 g/cm 2 ) on a gold plated quartz tip (b) sitting in a metal housing (c). The quartz tip is transparent to light of the wavelength 150 nm < < 4000 nm, thus not cutting into the UV-spectrum of the deuterium lamp. The work function of gold is 4.83 ± 0.02 eV. Therefore, only electrons with excess energies of up to E max = 6.7 eV − 4.83 eV = 1.87 eV can be released. The gold plated tip is supplied with a voltage of U tip = −18 kV. The photoelectrons are finally accelerated to the energy qU tip = 18 keV in forward direction by a blind (a) on ground potential. The electron source is mounted on a manipulator which allowed us to move it on a sphere up to ± 19 into horizontal and vertical direction (cf. Fig. 4). This corresponds to a motion for the gold plated tip on a radius of 1.06 m around the point with the pre-spectrometer coordinates z = −2.4 m and r = 0 m, 0.25 m behind the center of the source magnet (cf. Fig. 4). The intensity stability of the electron source was measured to better than 0.2 % per hour. Fig. 5 shows the cross-section of the E-GUN. The reader should keep in mind that the electric field near the gold plated tip (b) is strong and there is a ground blind (a) mounted in front of the tip. Together with the electric and magnetic fields inside the E-GUN and the detector, these two components play an important role in our data analysis (Section 6). The pre-spectrometer detector is a quadratically segmented silicon PIN diode with 64 pixels of equal size and properties. It has an overall sensitive area of 16 cm 2. It is a predecessor of the final KATRIN detector and was manufactured with the same processing techniques. For simulations of the detector response, the energy resolution ∆E FWHM and the dead layer thickness have to be known. The detector system exhibited a measured average ∆E FWHM ≈ 3.5 keV. The thickness of the dead layer was determined as = 119 ± 3 nm and = 109 ± 3 nm by using two independent experimental techniques. The detector is located at z = 2.3 m (15 cm behind the center plane of the detector magnet) at B = 3.4 T and at ground potential (Fig. 4). If the detector is centered on the PS axis, its area corresponds to 28.5 % of KATRIN's magnetic flux tube. In our measurements, the detector was adjusted laterally so that only a single pixel was hit and the data analysis was made with this single pixel. The energy calibration of the pixel was used to select events in the region of interest from 15 keV to 21 keV. For our measurements, the inner electrodes (E) and (F) (cf. Fig. 4) were put on a voltage of U electrode − U tank = −500 ± 0.1 V with respect to the PS tank using a voltage supply (Canberra 3101/2) mounted inside a rack on tank potential. The tank voltage itself (U tank between -0.5 kV and -17.5 kV) was supplied by another voltage source (FUG HCN 140M-35000). The gold plated tip of the E-GUN was supplied with a constant voltage of U tip = −18 kV by a high voltage supply (FUG HCN 35-35000). The accuracy of the voltage difference determination between the tank and the gold plated tip was better than 10 V. Measurements We performed six measurement series with E = 18 keV electrons (Tab. 1). The PS solenoids were set to the KATRIN design value of B sol = 4.5 T (Figs. 1 and 4) and half this value B sol = 2.3 T. As the cyclotron length grows with 1/B (Eq. 1), deviations from the ideal transmission properties described in Section 2 are more probable for the decreased B-field B sol = 2.3 T. The electrons pass the PS on a radius r(z), which encloses a constant magnetic flux enc (in a homogeneous B-field one has enc ≈ r 2 (z) B(z)). For each B-field, the PS transmission was measured for three different In each measurement series, the PS filter energy qU PS was stepped repeatedly from 1 keV towards 18 keV and back to 1 keV, using identical time intervals. Combining the counts from the ramp up and the ramp down eliminates a possible linear drift in the emission rate of the E-GUN. Each detector run at a constant potential lasted for about R = 48 s. The procedure was repeated up to five times (see 'scans' in Tab. 1), resulting in an overall measurement time of e.g. 478 s ≈ scans 2 R for the measurement with B sol = 4.5 T and r cen = 0 cm. From the runs with the identical voltages, r cen and B-field settings, electron events were summed and divided by the overall measurement time to obtain an average electron rate. The detector was adjusted laterally before the Simulation Tools The analysis of our experimental data is done by comparing it to computer simulations. The main components of our simulations are: (i), electric and magnetic field computations; (ii), electron tracking in vacuum; (iii), electron scattering with H 2 molecules; (iv), electron tracking in silicon. For the electric field calculations (axisymmetric and three-dimensional, with wires) we used the boundary element method. In order to speed up the simulation in axisymmetric regions, we employed the zonal harmonic expansion method. This turned out to be useful also for the E-GUN -pre-spectrometer geometry, which is not axially symmetric but consists of two separated, locally axisymmetric regions (E-GUN region and pre-spectrometer region). The zonal harmonic expansion method was also used for magnetic field computations. For the electron tracking in vacuum, the exact relativistic equation of motion of the electron with Lorentz force was employed, using an explicit 8th order Runge-Kutta method to solve the ordinary differential equations. The electron-H 2 scattering code contains total and differential cross sections and Monte Carlo generation algorithms for elastic, electronic excitation and ionization collisions of electrons with H 2 molecules. Electron detection, electron energies deposited in the sensitive volume of the silicon detector, the detector dead layer, and electron backscattering at the detector are modeled by a Monte Carlo C++ code (KESS: KATRIN Electron Scattering in Silicon), which is based on detailed studies and agrees well with experimental data. Our original field calculation, 8th order Runge-Kutta tracking and e-H 2 scattering C codes have been rewritten into C++ and integrated into the global KATRIN The simulation was normalized so that the rate at E sur = E − qU PS = 0.5 keV is 1. The experimental rate was normalized so that its average rate is equal to the average of the simulated rate. Only simulation values where a measurement exists were considered for this average. The simulation points are shifted to the right by 0.2 keV to make the points distinguishable. The plot shows that our simulation and our measurements are compatible at the percent level. The simulation was normalized so that the rate at E sur = E − qU PS = 0.5 keV is 1. The experimental rate was normalized so that its average rate is equal to the average of the simulated rate. Only simulation values where a measurement exists were considered for this average. The simulation points are shifted to the right by 0.2 keV to make the points distinguishable. At = 19 / r cen = 52.3 cm we measured at fewer filter potentials. The plot shows that our simulation and our measurements are compatible at the percent level. C++ simulation framework 'Kassiopeia'. KESS has also been integrated into Kassiopeia. We have used both the original C codes and the new Kassiopeia C++ code for the simulations of our paper. Simulation and Analysis Ignoring the influence of the E-GUN and the detector, the PS transmission at keV surplus energies in our codes is always 100%: In order to show this, we started 130 electrons with uniformly distributed polar-and azimuthal angles for Electrons impinging on the detector with an incident energy E inc = 18 keV and an incident angle inc = 0 have a probability of about 20% to be backscattered, and higher incident angles further increase this probability. Most of the backscattered electrons have lost energy in the detector and are again reflected by the filter potential qU PS in the PS or by the magnetic mirror effect towards the detector. Backscattered electrons having deposited less than E − qU PS in the detector retain enough energy to pass the filter potential qU PS of the PS in backward (towards E-GUN) direction. They can enter the E-GUN through the opening in the ground blind (cf. Fig. 5) and can be electrically reflected towards the detector again. This process continues until all energy is finally deposited inside the detector. Even at high surplus energy E − qU PS, the count rate at the detector is constant. Flight times in the PS are of the order of 10 ns which is far below the s shaping time of the DAQ. Therefore, only electrons with large energy losses in the deadlayer will deposit an energy lower than the region of interest in the sensitive volume. Since all electrons hitting the detector have the same energy and same angular distribution for all settings of U electrode = U tank −0.5 kV, the count rate does not depend on this voltage setting. This explains the measurement at B sol = 4.5 T and r cen = 0 cm ( = 0 ), where no loss in count rate is observed. The axial rotation can cause the electron to eventually hit the ground blind (Fig. 8), depending on B, the electron energies E ⊥ and E and the total path length. The axial rotation is only dependent on the energy and has the same sense for a forward (towards detector) and backward (towards E-GUN) pass of the PS and is therefore adding up at each pass of the PS. The path for the electrons in the pre-spectrometer is elongated by reflections at the detector, at the field of the E-GUN, at magnetic fields and at the spectrometer potential and can thus be multiples of the spectrometer length. The higher the surplus energy, the higher the probability for a backscattered electron to overcome the spectrometer potential after energy deposits in the detector. The higher the surplus energy, the larger the axial rotation which guides electrons onto the ground blind. Therefore, the count rate in the region of interest decreases with higher surplus energy E sur = E 0 − qU PS. With this, all measurements at B sol = 4.5 T can be explained. For B sol = 2.3 T a loss in count rate is observed for all E-GUN settings, including the one at r cen = 0 cm ( = 0 ). It is not possible to explain this effect with backscattering and the B ∇| B| drift alone. The electric field gradient in the E-GUN is large compared to the PS, since the potential difference of 18 kV is applied across a distance of only a few cm. Together with the 50% lower magnetic field in the center of the magnets, this can lead to a non-adiabatic transport in the E-GUN region. At B sol = 2.3 T, the E-GUN is located at a magnetic field of B ≈ 0.02 T. Thus, a backscattered electron entering the E-GUN through the ground blind has a probability to change its angle towards the magnetic field line non-adiabatically. Depending on the new angle and the electron energy, it can be trapped between the E-GUN and the closest magnet or the spectrometer potential. Thus, a loss in count rate in the region of interest will also be observed for measurements with r cen ( = 0 ). This assumption is therefore able to explain the measurement for B sol = 2.3 T and r cen ( = 0 ). In our simulations, the electrons were started with a uniform random kinetic energy of 0 < E < 2 eV, and were uniformly distributed on a disc with diameter d = 1 mm in front of the actual gold tip. An angular distribution, = arcsin(R 1 ) from and = 2R 2 with uniformly distributed random numbers R 1, R 2 ∈ [0, 1[ was used. The time an electron travels between two subsequent detector hits is more than two magnitudes smaller than the DAQ shaping time. This means, subsequent hits are analyzed by the DAQ as one hit. Therefore, each energy deposition in the sensitive detector volume per electron was summed up, even for electrons with multiple detector entries. As in the experimental data analysis, all electrons with energies 15 < E < 21 keV were counted. Towards the axis of the pre-spectrometer, the B-field is stronger and the curvature of the electron trajectory is larger, resulting in an overall B ∇B drift around the pre-spectrometer axis. If the drift is large enough, the backscattered electron hits the ground blind on its way back from the detector. Possible exit conditions of the simulation were: Electron hits E-GUN blind (cf. Fig. 5 and Fig. 8). Electron energy lower than 100 eV. It is no longer able to pass the dead layer of the detector. Electron was reflected more than 20 times in the PS (trapping). Figures 6 and 7 show the normalized simulation and experimental results. They show that our simulation can explain the experimental rates at the percent level. The agreement between experiment and simulation implies that the PS has 100% transmission probability for electrons with keV surplus energy as long as B sol is not smaller than 2.3 T (half the KATRIN design value ). Operating the PS at reduced zero filter energy The previous section proves experimentally that the PS can be operated at reduced or even zero filter energy without essential transmission losses, as long as B sol is not smaller than 2.3 T. In this section we discuss more generally, with the help of simulations, the operation of PS with zero potential. We show first that, with U PS = 0, the background due to positive ions created by beta electrons is expected to be far below the KATRIN requirement of b = 10 mHz, and second that the electron motion through the PS and MS is fully adiabatic. Lowering the filter energy qU PS of the PS, the depth of the natural Penning trap, created by the PS and MS retarding potentials U PS and U MS and the magnetic field B sol, between the PS and MS decreases, and the corresponding background level is expected to decrease, too. On the other hand, there is another background component that increases with decreasing qU PS : more -electrons reach the entrance of the main spectrometer. These electrons are not able to produce direct background, but they create positive ions through ionizing collisions with the residual gas molecules. These ions can fly deep into the main spectrometer, and they can produce low energy electrons there, either in the residual gas or at the inner surface of the main spectrometer electrodes. Some of these electrons can hit the detector, and so we obtain background events. We will present below a quantitative estimate for the maximal value of this background component, in case of zero PS potential. Inside the WGTS, 10 11 electrons are produced through tritium beta decays each second. About 20 % of them move through the transport system and reach the entrance of the main spectrometer in the case of qU PS = 0 keV. Along their way, many of these electrons have ionizing collisions with residual gas molecules and thus produce positive ions. In the absence of any hindrances, all these ions enter the main spectrometer. In order to reduce the background rate of these ions, we plan to use an ion-blocking electrode near the center of the main spectrometer entrance magnet. Since the positive ions created by ionizing collisions have small kinetic energy (below few eV), an accordingly small potential barrier created by an ion-blocking electrode would already prevent these ions from entering the MS and thus from producing background. Nevertheless, the ions created in the region between the ion-blocking electrode and the high-voltage area of the MS are able to enter the MS. We have computed the ion creation rate in this region by detailed trajectory simulations. First, we generated electrons at the center of the MS entrance magnet by using the tritium -decay Fermi spectrum and an isotropic angular distribution. These electrons were tracked until their reflection at the MS filter potential and then back to their starting point. The ionization probability of an electron was computed by summing the differential ionization probabilities dP ion = ion (E kin ) n dl, where ion (E kin ) denotes the ionization cross section of electrons with the residual gas molecules, as function of the electron kinetic energy E kin, n is the number density of the residual gas, and dl the differential pathlength. In our simulations, we assumed molecular hydrogen for the residual gas, and we used the e − H 2 ionization cross section formulas of (they are in good agreement with measured e − H 2 cross section values). Assuming p = 10 −11 mbar pressure and room temperature, the number density is n = 2.4 10 11 m −3. According to our calculations (simulation of 1000 electron tracks), the average ionization cross section is ion = 10 −21 m 2, the average electron pathlength is l = 1.5 m, and the average ionization probability of an electron is P ion = 3.6 10 −10. Using the 2 10 10 s −1 -electron intensity, we obtain a positive ion creation rate of roughly + ≈ 7 s −1 in the region between the ion-blocking electrode and the high potential domain of the MS. If an electron is scattered towards large polar angles (remember E ⊥ = E kin sin 2 ), it can become trapped in a hybrid trap near the entrance of the MS: if this electron moves towards the entrance magnet of the MS coming from inside the MS, increases adiabatically until = 90 is reached, and the electron starts to move towards the MS again. The MS magnet thus establishes a magnetic mirror for these electrons. Inside the MS, the electron is electrically reflected by the MS filter potential generated by the MS electrode system. In order to compute the ion creation rate due to these trapped electrons, we simulated 10 8 electrons. We used our custom C codes to compute the electromagnetic fields, the trajectories and the e − H 2 scattering. The result of these simulations is the following: the average trapping probability of the beta electrons in the hybrid trap is 3 10 −11, and the number of ions created by a trapped electron is smaller than 5 (the trapped electrons can leave the trap by scattering and by energy loss due to synchrotron radiation). From these numbers and from the above beta intensity we get an ionization rate that is smaller than 3 s −1. Therefore, the ion creation rate due to these trapped electrons is smaller than due to the free (non-trapped) electrons. The ions with roughly + ≈ 10 s −1 rate enter the MS, they will be accelerated to about 18.57 keV kinetic energy, and due to the small (few Gauss) magnetic field, their motion inside the MS is completely non-adiabatic: they move on a straight line, until they hit the spectrometer tank. During this motion, they can suffer ionizing collisions with the residual gas. The ionizing collision cross section of H + and H + 2 ions of 18 keV kinetic energy with H 2 molecules is about + ion = 2 10 −20 m 2. Assuming l + = 20 m pathlength for the positive ions inside the main spectrometer tank, the secondary electron creation rate due to ionizing collisions of the positive ions with H 2 molecules is e = + + ion l + n = 10 −6 s −1, corresponding to 0.001 mHz background level. This background increases quadratically with the residual gas pressure, so with p = 10 −10 mbar the background rate would be 0.1 mHz. Another background possibility is the following: the ions hit the MS tank with high velocity, and these impact events are connected with secondary electron emission from the surface. One ion can eject more than 1 electron; let us assume that this multiplication number is 10. Then we get a secondary electron emission rate of 100 s −1 from the tank surface. This is several orders of magnitude smaller than we expect from cosmic ray muons and environmental radioactivity. Thanks to the magnetic shielding of the approximately axisymmetric magnetic field and to the electric shielding of the wire electrode, only a very small proportion of these electrons is expected to reach the detector; extrapolating the experimental data of the Mainz neutrino mass spectrometer, this proportion could be about 10 −7. With this suppression factor, we get 0.01 mHz background level from these electrons. For both scenarios we obtain a background level caused by positive ions which is several orders of magnitude smaller than the b = 10 mHz background value that would be acceptable for the KATRIN experiment. Therefore, our simulations show that the PS could be used with zero potential, without any significant background increase due to the positive ions produced by the beta electrons. With zero or small PS filter energy, the signal electrons (E 0 = 18.57 keV) have a high surplus energy E sur = E 0 − qU PS inside the PS. Due to the relatively small magnetic field in the middle of the PS (B ana,PS = 0.016 T), it could happen, in principle, that the motion of these electrons is not adiabatic. We have checked the adiabaticity behaviour of the electrons with detailed trajectory simulations. For this purpose, we started the electrons in the KATRIN source (WGTS) at various points with various polar and azimuthal direction angles, and we tracked them as far as the main spectrometer analyzing plane. We defined the starting kinetic energy of the electrons with the following procedure: first, using the starting point, direction vector and a first estimate for the transmission energy, we computed the guiding center point corresponding to the starting point. Then, we simulated the magnetic field line from the guiding center point until the MS analyzing plane. Using the electric potential and magnetic field values at the two endpoints of this field line, and the starting polar angle, we calculated the adiabatic transmission energy: in the adiabatic approximation, the electron has zero longitudinal energy E in the analyzing plane (cf. Fig. 2) if it starts with this energy (electrons starting below or above this energy are reflected or transmitted, respectively). We defined the starting kinetic energy of our electrons as the above adiabatic transmission energy plus a small surplus energy (E sur = E − qU PS = 3 meV). If the electron motion is fully adiabatic, E has to be precisely equal to E sur in the analyzing plane. The main result of our simulations is the following: both for a PS filter energy of qU PS = 0 keV and qU PS = 18.3 keV, and for all starting parameters, using the standard KATRIN magnetic field values (3.6 T in WGTS, etc.), |E − E sur | (computed by exact tracking) is in the MS analyzing plane on the average 0.2 meV, which is four orders of magnitude smaller than the resolution ∆E MS = 0.93 eV of the KATRIN MS. Therefore, we can say that the motion of signal electrons in the KATRIN system is practically adiabatic, even with U PS = 0 kV; deviations from adiabaticity have a negligible effect to the KATRIN transmission function. Note that that the electron motion is approximately adiabatic even if the magnetic field in the whole KATRIN system is half of its standard design value; in this case, |E −E sur | is in the MS analyzing plane on the average 0.6 meV. The local behaviour of the quantity was considered as adiabatic invariant in Section 2. There, the difference between the electron energy E and the filter energy qU f was assumed to be small (some tens of eV). As one can see in Fig. 9, has an oscillational behavior, due to the superposition of the cyclotron motion and the azimuthal magnetron motion; the oscillation period in Fig. 9 is equal to the electron cyclotron period. The amplitude of the oscillation depends on the electron surplus energy E sur = E − qU PS : With E sur ≈ 0.3 keV in the PS, the relative fluctuation / inside the PS is order of 10 −2 (for smaller starting polar angle the fluctuation is somewhat larger), but with E sur of a few keV this fluctuation is much larger, order of 1 (Fig. 9). How is it then possible that regains its starting value, in spite of its large oscillations in the small magnetic field region? The explanation is the following : the real adiabatic invariant I ad, which is constant in the adiabatic approximation throughout the whole trajectory, is not, but a complicated function Figure 11. Evolution of the polar angle for a transmitted electron. Parameters as in Fig. 9. of the higher field derivatives. Inside homogeneous field regions the field derivatives are small, and there I ad ≈. When an electron moves from one homogeneous field region to another, and if the adiabatic invariant I ad is constant, can regain the starting value with high accuracy, although between these two regions, where the field gradients are large, can have large oscillations. Using a human analogy, we can say that the electron seems to have 'memory', remembering its initial value of. If the electron motion is not adiabatic (in case of high energy or small magnetic field), I ad is not constant, so in this case the electron will not gain back its original value in the second homogeneous field region; then, the electron has no memory. In order to illustrate this phenomenon, we simulated electrons starting in the entryside magnet of the PS. At the KATRIN field B sol = 4.5 T (Fig. 1 and 4), electrons are transmitted through the PS, and regains its original value (Fig. 9). If we choose a deliberately low field B sol = 0.57 T in simulations, electrons can be magnetically trapped and becomes chaotic (Fig. 10). If regains its original value, the angle is approximately determined by Eq. 3. Let us consider an electron which enters the PS in the entry-side magnet with < 90 and for which the polar angle is determined by Eq. 3. In this case, the electron will never acquire = 90 anywhere in the PS. Thus, it cannot be magnetically reflected (Fig. 11). On the other hand, if Eq. 3 is violated as shown in Fig. 10, the reflection angle = 90 can be reached over and over again (Fig. 12), the electron is magnetically trapped inside the PS. It is not transmitted, and so it is not detected. We mention that, at the edge of outer field lines of the flux tube, with B sol =4.5 T and zero PS potential, the 18 keV electrons make inside the PS about 2 degrees magnetron motional rotation around the beam axis. This has to be taken into account for precise imaging investigations of the KATRIN experiment (see Ref. for experimental examples of much larger magnetron motional rotation). The background as a function of the PS potential will be investigated experimentally when the whole KATRIN system is finished. Similarly, the MS transmission function and thus the adiabaticity of the electrons in PS and MS can be experimentally investigated rather precisely, as function of PS potential, if PS and MS are connected together (by shooting E-GUN electrons through them). The experiments presented in our paper, using only the PS, are sensitive only to large deviations from adiabaticity (small nonadiabaticity effects do not cause any transmission losses in the case of large surplus energies). Conclusion and Outlook Our investigations show that the PS filter energy qU PS can be reduced by several keV without any loss of transmission, making it possible to diminish the Penning trap between PS and MS. The actual value of the PS filter energy qU PS to minimize KATRIN's background has to be determined experimentally with the full KATRIN setup. It is possible that the final KATRIN setup will operate with a mixture of reduced PS filter energy and other, active measures removing stored electrons from the Penning trap between PS and MS. Sweeping a wire through the trapping volume has proven to be an efficient means to empty the trap. It will take about t wire = 1 s for the wire to sweep across the magnetic flux tube imaged on the detector. In order to scan the tritium -spectrum, the MS retarding voltage U MS will be changes every few minutes. The sweeps will be performed during these voltage changes to avoid a loss of measurement time. Yet, the corresponding 10 6 motion cycles are a very large number for an UHV compatible device. For KATRIN, the transmission function of the PS has to be known with permille accuracy. Our investigations indicate that this precision is only possible if the influence of multiple electron reflection at the electron source is suppressed. This can be achieved with a) a stable, pulsed electron source at the entry of the PS, b) the PS and MS in their final tandem configuration (cf. Fig. 1) and c) KATRIN's final detector having a time resolution of about 100 ns. A novel, angular selective pulsed UV laser photoelectron source, which can produce pulses as short as 40 ns with a repetition rate of up to 10 kHz, is currently being built for KATRIN on the basis of. In the experiment we propose, the MS will be operated so that the electrons retain only a few eV surplus energy in the MS and are therefore guaranteed to be transmitted if the MS performs as it should. The filter energy of the PS will be varied, just as in our experiment. In this configuration, multiple (up to about 14) reflections between the detector and the main-spectrometer potential will cease after less than 20 s. The fraction of backscattered electrons, which retain enough energy to pass the MS filter potential in backwards (towards electron source) direction and could possibly get lost at the electron source, is negligible in this configuration. Therefore, the analysis of this experiment will be much simpler than in our case. |
#ifndef __PATTERN_MATCH_H__
#define __PATTERN_MATCH_H__
#include <inttypes.h>
typedef uint32_t LONG;
enum NCODE { a = 1, c = 2, g = 3, t = 4};
enum NCODE ncode;
LONG hash(char *S); // hash function
LONG rolling_hash(char *S); // hash function
int compare_hash(LONG h1, LONG h2); // compare hash from text segment(h1) to pattern hash h2
LONG * pattern_match(char *T, char *p, size_t n, size_t m); // return pointer to array with starting locations
// of matches for p(pattern) in T (target/text)
#endif /*__PATTERN_MATCH_H__*/
|
<reponame>Anmol-Singh-Jaggi/interview-notes
"""
Given a sorted array in which all elements appear twice (one after one)
and one element appears only once.
Find that element in O(log n) complexity.
SOLUTION:
An Efficient Solution can find the required element in O(Log n) time.
The idea is to use Binary Search. Below is an observation in input array.
All elements before the required have first occurrence at even index (0, 2, ..)
and next occurrence at odd index (1, 3, …).
And all elements after the required element have first occurrence at odd index and next occurrence at even index.
1) Find the middle index, say ‘mid’.
2) If ‘mid’ is even, then compare arr[mid] and arr[mid + 1].
If both are same, then the required element after ‘mid’ else before mid.
3) If ‘mid’ is odd, then compare arr[mid] and arr[mid – 1].
If both are same, then the required element after ‘mid’ else before mid.
"""
def search(arr, low, high):
if low > high:
return None
if low == high:
return arr[low]
mid = low + (high - low) / 2
# If mid is even and element next to mid is
# same as mid, then output element lies on
# right side, else on left side
if mid % 2 == 0:
if arr[mid] == arr[mid + 1]:
return search(arr, mid + 2, high)
else:
return search(arr, low, mid)
else:
if arr[mid] == arr[mid - 1]:
return search(arr, mid + 1, high)
else:
return search(arr, low, mid - 1)
|
<filename>visualizations/plot_utils.py<gh_stars>0
############################################################################
# IMPORTS
############################################################################
import numpy as np
import seaborn as sns
import pandas as pd
import altair as alt
from collections import OrderedDict
from vega_datasets import data
import matplotlib as mpl
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Times New Roman'
from matplotlib import pyplot as plt
import matplotlib.ticker as tck
############################################################################
# Plotting Utilities, Constants, Methods for W209 arXiv project
############################################################################
#---------------------------------------------------------------------------
## Plotting Palette
#
# Create a dict object containing U.C. Berkeley official school colors for plot palette
# reference : https://brand.berkeley.edu/colors/
# secondary reference : https://alumni.berkeley.edu/brand/color-palette# CLass Initialization
#---------------------------------------------------------------------------
berkeley_palette = OrderedDict({
'berkeley_blue' : '#003262',
'california_gold' : '#fdb515',
'founders_rock' : '#3b7ea1',
'medalist' : '#c4820e',
'bay_fog' : '#ddd5c7',
'lawrence' : '#00b0da',
'sather_gate' : '#b9d3b6',
'pacific' : '#46535e',
'soybean' : '#859438',
'south_hall' : '#6c3302',
'wellman_tile' : '#D9661F',
'rose_garden' : '#ee1f60',
'golden_gate' : '#ed4e33',
'lap_lane' : '#00a598',
'ion' : '#cfdd45',
'stone_pine' : '#584f29',
'grey' : '#eeeeee',
'web_grey' : '#888888',
# alum only colors
'metallic_gold' : '#BC9B6A',
'california_purple' : '#5C3160',
# standard web colors
'white' : '#FFFFFF',
'black' : '#000000'
})
#---------------------------------------------------------------------------
## Altair custom "Cal" theme
#---------------------------------------------------------------------------
def cal_theme():
font = "Lato"
return {
"config": {
"title": {
"fontSize": 30,
"font": font,
"anchor": "middle",
"align":"center",
"color": berkeley_palette['berkeley_blue'],
"subtitleFontSize": 20,
"subtitleFont": font,
"subtitleAcchor": "middle",
"subtitleAlign": "center",
"subtitleColor": berkeley_palette['berkeley_blue']
},
"axisX": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end",
"titlePadding": 20
},
"axisY": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end",
"titlePadding": 20
},
"headerRow": {
"labelFont": font,
"titleFont": font,
"titleFontSize": 15,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"legend": {
"labelFont": font,
"labelFontSize": 15,
"labelColor": berkeley_palette['stone_pine'],
"symbolType": "stroke",
"symbolStrokeWidth": 3,
"symbolOpacity": 1.0,
"symbolSize": 500,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue']
},
"view": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"facet": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"row": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
}
}
}
alt.themes.register("my_cal_theme", cal_theme)
alt.themes.enable("my_cal_theme")
###################################################################################
###################################################################################
## DIVERGENCE DATA PREP
###################################################################################
###################################################################################
def get_divergence_data(df):
df2_effective = df.groupby(by=['Treatment','Prompt','Effective']).ROWID.count().reset_index().sort_values(by=['Prompt','Effective','Treatment'])
df2_effective.columns = ['treatment', 'prompt','rank','total']
df2_effective['question'] = 'effective'
df2_intelligence = df.groupby(by=['Treatment','Prompt','Intelligence']).ROWID.count().reset_index().sort_values(by=['Prompt','Intelligence','Treatment'])
df2_intelligence.columns = ['treatment', 'prompt','rank','total']
df2_intelligence['question'] = 'intelligence'
df2_writing = df.groupby(by=['Treatment','Prompt','Writing']).ROWID.count().reset_index().sort_values(by=['Prompt','Writing','Treatment'])
df2_writing.columns = ['treatment', 'prompt','rank','total']
df2_writing['question'] = 'writing'
df2 = pd.concat([df2_effective, df2_intelligence, df2_writing], axis=0, ignore_index=True)
gt = df2.groupby(by=['treatment','prompt','question']).agg({'total':'sum'}).reset_index()
gt.columns = ['treatment','prompt','question','grand_total']
df2 = df2.merge(gt, on=['treatment','prompt','question'], how='inner')
df2['pct_of_total'] = (df2.total / df2.grand_total) * 100.
df2['pct_start'] = np.nan
df2['pct_end'] = np.nan
# fill in any missing votes as 0 percent votes
x = [(a, b, c, d) for a in df2.treatment.unique() for b in df2.prompt.unique() for c in df2['rank'].unique() for d in df2.question.unique()]
x = pd.DataFrame(x, columns=['treatment','prompt','rank','question'])
x = x.merge(df2[['treatment','prompt','rank','question','pct_of_total']], how='left', on=['treatment','prompt','rank','question'])
x = x[(x.pct_of_total.isna()==True)]
x.pct_of_total = np.float32(0.0)
df2 = pd.concat([df2,x], axis=0, ignore_index=True)
# set baseline in the middle
df2.loc[(df2['rank'] == 4), 'pct_start'] = df2.loc[(df2['rank'] == 4), 'pct_of_total']/2 * -1
df2['pct_end'] = df2['pct_start'] * -1
# calculate ranks 1-3 and 5-7
for r,t,p,q in [(a,b,c,d) for a in [3,2,1] for b in df2.treatment.unique() for c in df2.prompt.unique() for d in df2.question.unique()]:
# get starting value for negative percentages, this becomes the "end" value for the next rank down
pct_start = np.float32(df2[((df2['rank'] == (r+1)) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q))].pct_start)
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_end'] = pct_start
pct_new_start = np.float32(df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_of_total'] * -1) + pct_start
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_start'] = pct_new_start
for r,t,p,q in [(a,b,c,d) for a in [5,6,7] for b in df2.treatment.unique() for c in df2.prompt.unique() for d in df2.question.unique()]:
pct_start = np.float32(df2[((df2['rank'] == (r-1)) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q))].pct_end)
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_start'] = pct_start
pct_end = np.float32(df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_of_total']) + pct_start
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_end'] = pct_end
return df2
###################################################################################
###################################################################################
## DIVERGENCE PLOTS (LIKERT)
###################################################################################
###################################################################################
def diverge_plot(data, question):
color_scale = alt.Scale(
domain=["1","2","3","4","5","6","7"],
range=[berkeley_palette["rose_garden"],
berkeley_palette["medalist"],
berkeley_palette["california_gold"],
berkeley_palette["bay_fog"],
berkeley_palette["lawrence"],
berkeley_palette["founders_rock"],
berkeley_palette["berkeley_blue"]]
)
select = alt.selection_multi(fields=['rank'])
p = alt.Chart()\
.transform_filter(alt.datum.question == question)\
.mark_bar().encode(
x=alt.X('pct_start:Q'),
x2=alt.X2('pct_end:Q'),
y=alt.Y('prompt:N', axis=alt.Axis(title=None, ticks=False, domain=False, offset=5, minExtent=60)),
color=alt.Color(
'rank:O',
legend=None,
scale=color_scale),
tooltip=[alt.Tooltip('treatment:N', title='Assignment'),
alt.Tooltip('question:N', title='Question'),
alt.Tooltip('rank:O', title='Rank (1-7)'),
alt.Tooltip('pct_of_total:Q', title='% of Total', format='.2f')],
opacity=alt.condition(select, alt.OpacityValue(1.0), alt.OpacityValue(0.5))
).properties(height=150,width=650,title={'text':''}).add_selection(select)
l = alt.Chart(pd.DataFrame({'X':[0]})).mark_rule(size=3, color=berkeley_palette["pacific"], strokeDash=[10,5])\
.encode(x=alt.X('X', type='quantitative', title=None))
return alt.layer(p, l)
def macro_diverge_plot(data, question, title):
c = diverge_plot(data, question)\
.facet(
row=alt.Row('treatment:N',
sort=alt.SortArray(['Control','Typographical','Phonological']),
header=alt.Header(
labelColor=berkeley_palette['pacific'],
labelFontSize=20,
labelFont='Lato',
title=""
)
),
title=title,
data=data)\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return c
###################################################################################
###################################################################################
## PARTICIPANT COUNT PLOTS (send in only with [treatment, total] columns)
###################################################################################
###################################################################################
def participant_count_plot(data):
b = alt.Chart().mark_bar(line={'color':berkeley_palette['web_grey']}).encode(
x = alt.X('treatment:O', sort=['Control', 'Typographical', 'Phonological'],
axis = alt.Axis(title = 'Assignment Group', labelAngle=0, labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.Y('total:Q', axis = alt.Axis(title = "Participants Assigned", labelPadding=10, labelFontSize=20, titleFontSize=25),
scale=alt.Scale(domain=[0,14])),
color = alt.Color('treatment:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['pacific'], berkeley_palette['berkeley_blue'], berkeley_palette['founders_rock']]))
)
t = alt.Chart().mark_text(
color = berkeley_palette['white'],
size = 20,
align='center',
baseline='middle',
dy = 20).encode(
x = alt.X('treatment:O', axis=None, sort=['Control', 'Typographical','Phonological']),
y = alt.Y('total:Q'),
text = alt.Text('total:Q')
)
p = alt.layer(b, t, data = data)\
.properties(height=300,width=650,title={'text':'Pilot Participation'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
def participant_count_plot_live(data):
df2 = data[['Start Date','Treatment','ROWID']].copy()
df2['Start Date'] = df2['Start Date'].dt.normalize()
df2 = df2.drop_duplicates().groupby(by=['Start Date','Treatment']).agg({'ROWID':'count'}).reset_index()
df2.columns = ['date','branch','total']
df2['display_date'] = df2.date.dt.strftime('%b %d')
df2['source'] = 'Amazon'
df2.loc[(df2.date > '2021-04-05'), 'source'] = 'XLab'
df2 = df2.groupby(by=['branch','source']).agg({'total':'sum'}).reset_index().rename(columns={'branch':'treatment'})
base = alt.Chart().mark_bar().encode(
x=alt.X('total:Q', axis=alt.Axis(title = 'Participants Assigned', labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.X('treatment:O', axis=alt.Axis(title = '', labelAngle=0, labelPadding=10, labelFontSize=20, titleFontSize=25), sort=['Control', 'Typographical','Phonological']),
color = alt.Color('treatment:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['pacific'], berkeley_palette['berkeley_blue'], berkeley_palette['founders_rock']]))
).properties(width=650, height=150)
txt = base.mark_text(dx=-15, size=15).encode(
text='total:Q',
color=alt.value('white')
)
p = alt.layer(base, txt).properties(width=600, height=150, title={'text':''})\
.facet(
row=alt.Row('source:N',
sort=alt.SortArray(['XLab','Amazon']),
header=alt.Header(labelColor=berkeley_palette['pacific'], labelFontSize=25,labelFont='Lato',title='')
),
data=df2,
title='Live Study Participation'
).configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## MISSING DEMOGRAPHICS DATA (HTML w/ PANDAS STYLER)
###################################################################################
###################################################################################
def color_all_missing(val):
color = 'white' if val == 0 else 'black'
return 'color: %s' % color
def highlight_missing(s):
is_max = s == 0
return ['background-color: black' if v else '' for v in is_max]
def highlight_missing_max(s):
is_max = s == s.max()
return ['background-color: black' if v else '' for v in is_max]
def color_all_missing_max(val):
color = 'white' if val == df.shape[0] else 'black'
return 'color: %s' % color
def get_missing_demographics(df):
cm = sns.light_palette("#0067B0", as_cmap=True)
cols = [c for c in df.columns if c in ['Year','Gender','English','Race',
'Country','State','Student','Degree']]
rend = pd.DataFrame({'% Missing Values' : round(df[cols].isnull().mean() * 100, 2),
'Missing Values (Count)' : df[cols].isnull().sum(),
'Non-Null Values' : df[cols].notnull().sum(),
'Density' : 1 / df[cols].nunique()})\
.style.bar(color = "#22a7f0", align = 'left', subset=['% Missing Values'])\
.background_gradient(cmap=cm, subset=['Density'])\
.apply(highlight_missing, subset=['Non-Null Values'])\
.apply(highlight_missing_max, subset=['Missing Values (Count)'])\
.set_caption('Distribution of Missing Demographic Values')\
.set_precision(2)
return rend
###################################################################################
###################################################################################
## DEMOGRAPHICS : YEAR DISTRIBUTION (GOOD DATA ONLY)
###################################################################################
###################################################################################
def get_good_demographic_year(df):
df2 = df.copy()
df2.Year = df2.Year.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Year']).size()\
.reset_index()[['ROWID','Year']].Year.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'year', 'Year':'count'}).sort_values(by='year')
strange_values = ['19996','25','26','54','<MISSING>','Los Angeles','Mumbai, India','US','2020']
good = df2[(~df2.year.isin(strange_values))].copy()
good['year'] = good['year'].astype(int)
p = alt.Chart(good).mark_bar(size=15, color=berkeley_palette['pacific'], line={'color':berkeley_palette['web_grey']})\
.encode(
x = alt.X('year:Q', bin=False,
axis=alt.Axis(format='.0f', labelAngle=-45),
scale=alt.Scale(domain=[min(good.year), max(good.year)]),
title='Year of Birth'
),
y = alt.Y('count:Q',
axis=alt.Axis(title='Frequency')
)
).properties(height=300, width=650, title={'text':'Distribution of Birth Year', 'subtitle':'Valid Data Only'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-10)
return p
###################################################################################
###################################################################################
## DEMOGRAPHICS : GENDER DISTRIBUTION PLOT
###################################################################################
###################################################################################
def get_demographic_gender(df):
df2 = df.copy()
df2.Gender = df2.Gender.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Gender']).size()\
.reset_index()[['ROWID','Gender']].Gender.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'gender', 'Gender':'count'}).sort_values(by='gender')
b = alt.Chart()\
.mark_bar(
color=berkeley_palette['rose_garden'], opacity=0.85,
stroke=berkeley_palette['berkeley_blue'],
strokeWidth=1
).encode(
x=alt.X('gender:N',
axis=alt.Axis(labelAngle=-45, labelFontSize=20, title='Participant Gender', titleFontSize=25)),
y=alt.Y('count:Q',
axis = alt.Axis(title='Frequency', titleFontSize=25))
)
t = alt.Chart().mark_text(
color = berkeley_palette['pacific'],
size = 20,
align='center',
baseline='middle',
dy = -20
).encode(
x = alt.X('gender:N', axis=None),
y = alt.Y('count:Q', axis=None),
text = alt.Text('count:Q')
)
p = alt.layer(b, t, data=df2)\
.properties(height=300,width=700,title={'text':'Distribution of Gender'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## DEMOGRAPHICS : COUNTRY DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_country(df):
df2 = df.copy()
df2.Country = df2.Country.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Country']).size()\
.reset_index()[['ROWID','Country']].Country.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'country', 'Country':'count'}).sort_values(by='country')
ctry = pd.DataFrame({
'country':['<MISSING>', 'Afghanistan', 'Canada', 'China', 'France',
'Hong Kong (S.A.R.)', 'India', 'Italy', 'Mexico', 'New Zealand',
'Portugal', 'Singapore', 'United Kingdom of Great Britain and Northern Ireland',
'United States of America'],
'id':[0, 4, 124, 156, 250, 344, 356, 380, 484, 554, 620, 702, 826, 840]})
df2 = df2.merge(ctry, how='inner', on='country')
source = alt.topo_feature(data.world_110m.url, "countries")
background = alt.Chart(source).mark_geoshape(fill="white")
foreground = (
alt.Chart(source)
.mark_geoshape(stroke=berkeley_palette['bay_fog'], strokeWidth=0.25)
.encode(
color=alt.Color(
"count:N", scale=alt.Scale(range=[berkeley_palette['pacific'], berkeley_palette['lawrence'],
berkeley_palette['lap_lane'], berkeley_palette['founders_rock'],
berkeley_palette['founders_rock'], berkeley_palette['berkeley_blue']]), legend=None,
),
tooltip=[
alt.Tooltip("country:N", title="Country"),
alt.Tooltip("count:Q", title="Participants"),
],
)
.transform_lookup(
lookup="id",
from_=alt.LookupData(df2, "id", ["count", "country"]),
)
)
final_map = alt.layer(background, foreground)\
.properties(width=700, height=400, title={'text':'Distribution of Country'})\
.configure_title(anchor='middle')\
.configure_title(dy=-10)\
.project("naturalEarth1")\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_view(stroke=None, strokeWidth=0)\
.configure_axis(grid=False)
return final_map
###################################################################################
###################################################################################
## DEMOGRAPHICS : STATE DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_state(df):
df2 = df.copy()
df2.State = df2.State.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','State']).size()\
.reset_index()[['ROWID','State']].State.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'state', 'State':'count'}).sort_values(by='state')
codes = pd.DataFrame({'state':['Alabama','Alaska','Arizona','Arkansas','California',
'Colorado','Connecticut','Delaware','District of Columbia','Florida','Georgia',
'Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana',
'Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri',
'Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York',
'North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island',
'South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia',
'Washington','West Virginia','Wisconsin','Wyoming','Puerto Rico'],
'id':[1,2,4,5,6,8,9,10,11,12,13,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,44,45,46,47,48,49,50,51,53,54,55,56,72]})
df2 = df2.merge(codes, how='left', on='state').fillna(-99)
df2.id = df2.id.astype(int)
states = alt.topo_feature(data.us_10m.url, 'states')
b = alt.Chart(states).mark_geoshape(stroke=berkeley_palette['white'], strokeWidth=0.25).encode(
color=alt.Color(
"count:N", scale=alt.Scale(range=[berkeley_palette['pacific'], "#00b0da",
"#009dcb", "#008aba", "#0077aa", "#006598", "#005386", "#004274", "#003262"]), legend=None),
tooltip=[
alt.Tooltip("state:N", title="U.S. State"),
alt.Tooltip("count:Q", title="Participants")]
).transform_lookup(
lookup='id',
from_=alt.LookupData(df2, 'id', ["count","state"]))\
.project(type='albersUsa')\
.properties(width=700, height=400, title={'text':'Distribution of U.S. State'})\
.configure_title(anchor='middle')\
.configure_title(dy=-10)\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_view(stroke=None, strokeWidth=0)\
.configure_axis(grid=False)
return b
###################################################################################
###################################################################################
## DEMOGRAPHICS : STUDENT STATUS DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_student_status(df):
df2 = df.copy()
df2.Student = df2.Student.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Student']).size()\
.reset_index()[['ROWID','Student']].Student.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'student', 'Student':'count'}).sort_values(by='student')
df2 = df2.sort_values(by = ['count','student'], ascending=False)
y = df2['count'].values
x = df2.student.values
x_label = 'Student Status'
y_label = 'Frequency'
y_label2 = '% of Total'
title = 'Distribution of Student Status'
show_pct_y = True
tot = df2['count'].sum()
pct_format='{0:.0%}'
def my_format(num, x):
return (str(num*100)[:4 + (x-1)] + '%').replace('.','')
# build the pareto chart
fig = plt.figure(figsize=(10, 7), dpi = 100)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
bars = ax1.bar(x = x, height = y, width = 0.9, align = 'center', edgecolor = berkeley_palette['berkeley_blue'],
color = '#0078D4', linewidth = 1, alpha = 0.8)
ax1.set_xticks(range(df2.shape[0]))
ax1.set_xticklabels(x, rotation = 45, fontsize=12)
for xtick in ax1.get_xticklabels():
xtick.set_color(berkeley_palette['black'])
ax1.get_yaxis().set_major_formatter(
tck.FuncFormatter(lambda x, p: format(int(x), ',')))
ax1.tick_params(axis = 'y', labelsize = 10)
ax1.tick_params(axis = 'y', labelcolor = berkeley_palette['pacific'])
if x_label:
ax1.set_xlabel(x_label, fontsize = 20, horizontalalignment = 'right', x = 1.0,
color = berkeley_palette['pacific'], labelpad=10)
if y_label:
ax1.set_ylabel(y_label, fontsize = 20, horizontalalignment = 'right', y = 1.0,
color = berkeley_palette['pacific'], labelpad=20)
if title:
plt.title(title, fontsize = 25, fontweight = 'semibold', color = berkeley_palette['berkeley_blue'], pad = 30, loc='center')
weights = y / tot
cumsum = weights.cumsum()
cumsum = [0.999999999 if x >= 1.0 else x for x in cumsum]
cumsum[len(cumsum)-1] = 1.0
ax2.plot(x, cumsum, color =berkeley_palette['black'], label = 'Cumulative Distribution', alpha = 1)
ax2.scatter(x, cumsum, color = berkeley_palette['rose_garden'], marker = 'D', s = 15)
ax2.set_ylabel('', color = berkeley_palette['berkeley_blue'])
ax2.tick_params('y', colors = berkeley_palette['web_grey'])
ax2.set_ylim(0, 1.01)
vals = ax2.get_yticks()
ax2.set_yticks(vals.tolist())
ax2.set_yticklabels([pct_format.format(x) for x in vals], fontsize = 10)
# hide y-labels on right side
if not show_pct_y:
ax2.set_yticks([])
else:
if y_label2:
ax2.set_ylabel(y_label2, fontsize = 20, horizontalalignment = 'right', y = 1.0,
color = berkeley_palette['pacific'], labelpad = 20)
ax2.set_yticklabels([])
ax2.set_yticks([])
#formatted_weights = [pct_format.format(x) for x in cumsum]
formatted_weights = [my_format(x, 0) for x in cumsum]
for i, txt in enumerate(formatted_weights):
ax2.annotate(text = txt, xy = (x[i], cumsum[i] + .05), fontweight = 'bold', color = berkeley_palette['black'], fontsize=15)
if '<MISSING>' in df2.student.values:
yy = df2[(df2.student.values=='<MISSING>')].values[0][1]
b = bars.get_children()[len(bars.get_children())-1]
xx = (b.get_x() + b.get_width() / 2) - 0.05
ax1.annotate(text = str(yy), xy = (xx, yy+5), fontweight = 'bold', color = berkeley_palette['rose_garden'], fontsize=15)
# Adjust the plot spine borders to be lighter
for ax in [ax1, ax2]:
for p, v in zip(["top", "bottom", "right", "left"], [0.0, 0.3, 0.0, 0.3]):
ax.spines[p].set_alpha(v)
# Sset the Y-axis grid-lines to dim, and display the Accuracy plot.
plt.grid(axis='y', alpha=.3)
plt.tight_layout()
#plt.show()
return plt
###################################################################################
###################################################################################
## DESCRIPTIVE STATISTICS STYLER (PANDAS)
###################################################################################
###################################################################################
def get_descriptive_statistics(df, cols = None):
if not cols:
cols = df.columns
rend = df[cols].describe()\
.T.style.background_gradient(cmap=sns.light_palette("#0067B0", as_cmap=True))\
.set_precision(2)
return rend
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_likert_variance(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
at = alt.Chart(df2).transform_density('likert_var', as_=['likert_var','Density'], groupby=['group'])\
.mark_area(opacity=0.5, stroke=berkeley_palette['black'], strokeWidth=2)\
.encode(
x = alt.X('likert_var:Q',
axis=alt.Axis(values=list(np.arange(0.0, 9.5, 0.5)), tickCount=19), title="Variance"),
y = alt.Y('Density:Q'),
color = alt.Color('group:N',
scale=alt.Scale(domain=df2.group.unique(),
range=[berkeley_palette['berkeley_blue'], berkeley_palette['california_gold']]),
legend = alt.Legend(title="Participant Group", padding=10,
symbolType="square", symbolStrokeWidth=1, orient="right", offset=-170)))\
.properties(height=250, width=650, title={'text':'Distribution of Variance', 'subtitle':'for Likert Scale Answers'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-5)
return at
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_likert_counts_by_group(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
tot = df2.groupby(by=['group','ROWID']).size().reset_index().rename(columns={'ROWID':'participant_id',0:'total_responses'})
lik = df2[(df2.likert_var == 0.0)].groupby(by=['group','ROWID']).size().reset_index().rename(columns={'ROWID':'participant_id',0:'uniform_responses'})
tot = tot.merge(lik, how='inner', on=['group','participant_id'])
tot['pct_uniform'] = tot.uniform_responses / tot.total_responses
tot.groupby(by=['group','uniform_responses']).size().reset_index().rename(columns={0:'count'})
base = alt.Chart().mark_bar(stroke=berkeley_palette['pacific'], strokeWidth=0.5).encode(
x=alt.X('count:Q', axis=alt.Axis(title = 'Frequency', labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.Y('uniform_responses:O', axis=alt.Axis(title = '', labelAngle=0, labelPadding=10, labelFontSize=20,
titleFontSize=25, values=[1,2,3,4,5,6], tickCount=6), sort=[1,2,3,4,5,6]),
color = alt.Color('uniform_responses:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['bay_fog'], "#00b0da", "#004274", berkeley_palette['golden_gate'], berkeley_palette['rose_garden']]))
).properties(width=650, height=150)
txt = base.mark_text(dx=-15, size=15).encode(
text='count:Q',
color=alt.value('white')
)
p = alt.layer(base, txt).properties(width=600, height=150, title={'text':''})\
.facet(
row=alt.Row('group:N',
sort=alt.SortArray(['XLab','Amazon']),
header=alt.Header(labelColor=berkeley_palette['pacific'], labelFontSize=25,labelFont='Lato', title='')
),
data=tot.groupby(by=['group','uniform_responses']).size().reset_index().rename(columns={0:'count'}),
title='Uniform Likert Respones'
).configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_wpm_plot(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
p = alt.Chart(df2).mark_bar(opacity=0.8, stroke=berkeley_palette['black'], strokeWidth=0.5).encode(
x = alt.X('wpm:Q', bin=alt.Bin(maxbins=100), title="Words per Minute (bin=100)"),
y = alt.Y('count()', title='Frequency'),
color=alt.Color('group:N',
scale=alt.Scale(range = [berkeley_palette['berkeley_blue'], berkeley_palette['california_gold']]),
legend = alt.Legend(title="Participant Group", padding=10,
symbolType="square", symbolStrokeWidth=1, orient="right", offset=-170))
).properties(height=300,width=650, title={'text':'Distribution of Response Time', 'subtitle':'Evaluated in Words per Minute'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-5)
return p |
package loadchoice;
import java.text.SimpleDateFormat;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import cn.hjdai.ztimer.ZooTask;
import cn.hjdai.ztimer.ZooTimer;
import cn.hjdai.ztimer.zooenum.DelayChoice;
import cn.hjdai.ztimer.zooenum.LoadChoice;
public class WEIGHT {
private ZooTask getZooTask() throws Exception {
return new ZooTask() {
public void process() throws Exception {
System.out.println("7777777777777777777777777777777");
Thread.sleep(1000L);
}
}.setTaskId("testWEIGHT")//
.setDelayChoice(DelayChoice.PRE_MOMENT)//
.setLoadChoice(LoadChoice.WEIGHT)//
.setFirstDate(new SimpleDateFormat("yyyyMMddHHmmss").parse("20161227130000"))//
.setFixedDelay(10000L);//
}
@Test
public void test() throws Exception {
Map<String, Integer> map = new HashMap<String, Integer>(2);
map.put("127.0.0.1", 3);
ZooTask zooTask = getZooTask().setWeightConfig(map);
ZooTimer zooTimer = new ZooTimer("127.0.0.1:2181", zooTask);
zooTimer.start(); // 启动
Thread.sleep(120000L);
zooTimer.stop(); // 停止
}
@Test
public void test1() throws Exception {
Map<String, Integer> map = new HashMap<String, Integer>(2);
map.put("127.0.0.1", 3);
ZooTask zooTask = getZooTask().setWeightConfig(map).setLocalIP("127.0.0.1");
ZooTimer zooTimer = new ZooTimer("127.0.0.1:2181", zooTask);
zooTimer.start(); // 启动
Thread.sleep(120000L);
zooTimer.stop(); // 停止
}
}
|
/* screen sets cursor based on swinid */
static void region_cursor_set(wmWindow *win, int swinid, int swin_changed)
{
for (ScrArea *sa = win->screen->areabase.first; sa; sa = sa->next) {
for (ARegion *ar = sa->regionbase.first; ar; ar = ar->next) {
if (ar->swinid == swinid) {
if (swin_changed || (ar->type && ar->type->event_cursor)) {
ED_region_cursor_set(win, sa, ar);
}
return;
}
}
}
} |
Station Keeping Trials in Ice: Ice Load Monitoring System In connection with the Statoil SKT project, DNV GL have developed a method for estimating ice loads on the ship hull structure and mooring tension of the anchor handling tug supply (AHTS) vessel Magne Viking by full scale measurements. In March 2017, the vessel was equipped with an extensive measurement system as a preparation for the dedicated station-keeping trial in drifting ice in the Bay of Bothnia. Data of the ice impacts acting on the hull were collected over the days of testing together with several other parameters from the ship propulsion system. Whilst moored, the tension in the mooring chain was monitored via a load cell and logged simultaneously to the other parameters. This paper presents the processes involved in developing the measurement concept, including the actual installation and execution phases. The basic philosophy behind the system is described, including the methods used to design an effective measurement arrangement, and develop procedures for estimation of ice loads based on strain measurements. The actual installation and the process of obtaining the recorded data sets are also discussed. |
//we migrated to 'remote' WebDriver
@Ignore
public class HtmlUnitDriverTest extends JUnit4TestBase {
@Test
public void canGetAPage() {
driver.get(appServer.whereIs(""));
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("")));
}
@Test
public void canGetAPageByUrl() throws MalformedURLException {
driver.get(appServer.whereIs(""));
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("")));
}
@Test
public void canGetPageSource() {
driver.get(appServer.whereIs(""));
assertThat(driver.getPageSource(), containsString("Hello"));
}
@Test
public void canSetImplicitWaitTimeout() {
driver.manage().timeouts().implicitlyWait(0, TimeUnit.SECONDS);
}
@Test
public void canNavigateToAPage() {
driver.navigate().to(appServer.whereIs(""));
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("")));
}
@Test
public void canNavigateToAnUrl() throws MalformedURLException {
driver.navigate().to(new URL(appServer.whereIs("")));
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("")));
}
@Test
public void canRefreshAPage() {
driver.get(appServer.whereIs(""));
driver.navigate().refresh();
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("")));
}
@Test
public void canNavigateBackAndForward() {
driver.get(appServer.whereIs("link.html"));
driver.findElement(By.id("link")).click();
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("index.html")));
driver.navigate().back();
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("link.html")));
driver.navigate().forward();
assertThat(driver.getCurrentUrl(), equalTo(appServer.whereIs("index.html")));
}
@Test(expected = WebDriverException.class)
public void throwsOnMalformedUrl() {
driver.get("www.test.com");
}
@Test
public void doesNotThrowsOnUnknownHost() {
driver.get("http://www.thisurldoesnotexist.comx/");
assertThat(driver.getCurrentUrl(), equalTo("http://www.thisurldoesnotexist.comx/"));
}
@Test(expected = NoSuchSessionException.class)
public void throwsOnAnyOperationAfterQuit() {
driver.quit();
driver.get(appServer.whereIs(""));
}
@Test
public void canGetPageTitle() {
driver.get(appServer.whereIs(""));
assertThat(driver.getTitle(), equalTo("Hello, world!"));
}
@Test
public void canOpenNewWindow() {
String mainWindow = driver.getWindowHandle();
openNewWindow(driver);
assertThat(driver.getWindowHandle(), equalTo(mainWindow));
}
@Test
public void canGetWindowHandles() {
openNewWindow(driver);
assertThat(driver.getWindowHandles().size(), equalTo(2));
}
@Test
public void canSwitchToAnotherWindow() {
String mainWindow = driver.getWindowHandle();
openNewWindow(driver);
Set<String> windowHandles = driver.getWindowHandles();
windowHandles.remove(mainWindow);
driver.switchTo().window(windowHandles.iterator().next());
assertThat(driver.getWindowHandle(), not(equalTo(mainWindow)));
}
@Test
public void canCloseWindow() {
String mainWindow = driver.getWindowHandle();
openNewWindow(driver);
Set<String> windowHandles = driver.getWindowHandles();
windowHandles.remove(mainWindow);
driver.switchTo().window(windowHandles.iterator().next());
driver.close();
driver.switchTo().window(mainWindow);
assertThat(driver.getWindowHandles().size(), equalTo(1));
}
@Test
public void canSwitchToFrame() {
driver.get(appServer.whereIs("frame.html"));
driver.switchTo().frame(driver.findElement(By.id("iframe")));
driver.switchTo().parentFrame();
driver.switchTo().frame("iframe");
driver.switchTo().parentFrame();
driver.switchTo().frame(0);
driver.switchTo().defaultContent();
}
@Test(expected = NoAlertPresentException.class)
public void throwsOnMissingAlertAcceptAnAlert() {
driver.switchTo().alert();
}
@Test
public void canAcceptAnAlert() {
driver.get(appServer.whereIs("alert.html"));
driver.findElement(By.id("link")).click();
Alert alert = driver.switchTo().alert();
assertThat(alert.getText(), equalTo("An alert"));
alert.accept();
}
@Test
public void canDismissAnAlert() {
driver.get(appServer.whereIs("alert.html"));
driver.findElement(By.id("link")).click();
Alert alert = driver.switchTo().alert();
assertThat(alert.getText(), equalTo("An alert"));
alert.dismiss();
}
@Test
public void canManageWindowSize() {
Dimension origSize = driver.manage().window().getSize();
driver.manage().window().setSize(new Dimension(200, 300));
assertThat(driver.manage().window().getSize(), equalTo(new Dimension(200, 300)));
driver.manage().window().maximize();
assertThat(driver.manage().window().getSize(), equalTo(origSize));
driver.manage().window().setSize(new Dimension(200, 300));
assertThat(driver.manage().window().getSize(), equalTo(new Dimension(200, 300)));
driver.manage().window().fullscreen();
assertThat(driver.manage().window().getSize(), equalTo(origSize));
}
@Test
public void canManageWindowPosition() {
Point origPosition = driver.manage().window().getPosition();
driver.manage().window().setPosition(new Point(200, 300));
assertThat(driver.manage().window().getPosition(), equalTo(new Point(200, 300)));
driver.manage().window().maximize();
assertThat(driver.manage().window().getPosition(), equalTo(origPosition));
driver.manage().window().setPosition(new Point(200, 300));
assertThat(driver.manage().window().getPosition(), equalTo(new Point(200, 300)));
driver.manage().window().fullscreen();
assertThat(driver.manage().window().getPosition(), equalTo(origPosition));
}
@Test
public void canSetGetAndDeleteCookie() {
driver.manage().addCookie(new Cookie("xxx", "yyy"));
assertThat(driver.manage().getCookieNamed("xxx"), equalTo(new Cookie("xxx", "yyy")));
assertThat(driver.manage().getCookies().size(), equalTo(1));
assertThat(driver.manage().getCookies().iterator().next(), equalTo(new Cookie("xxx", "yyy")));
driver.manage().deleteCookieNamed("xxx");
assertThat(driver.manage().getCookieNamed("xxx"), is(nullValue()));
}
@Test
public void canDeleteCookieObject() {
driver.manage().addCookie(new Cookie("xxx", "yyy"));
driver.manage().deleteCookie(new Cookie("xxx", "yyy", appServer.getHostName(), "/", null));
assertThat(driver.manage().getCookieNamed("xxx"), is(nullValue()));
}
@Test
public void canSetGetAndDeleteMultipleCookies() {
driver.manage().addCookie(new Cookie("xxx", "yyy"));
driver.manage().addCookie(new Cookie("yyy", "xxx"));
assertThat(driver.manage().getCookies().size(), equalTo(2));
driver.manage().deleteAllCookies();
assertThat(driver.manage().getCookies().size(), equalTo(0));
}
@Test
public void canExecuteScriptThatReturnsAString() {
driver.get(appServer.whereIs(""));
Object result = getWebDriver().executeScript("return window.location.href;");
assertThat(result, instanceOf(String.class));
assertThat(((String) result), equalTo(appServer.whereIs("")));
}
@Test
@SuppressWarnings("unchecked")
public void canExecuteScriptThatReturnsAnArray() {
driver.get(appServer.whereIs(""));
Object result = getWebDriver().executeScript("return [window.location.href];");
assertThat(result, instanceOf(List.class));
assertThat(((List<String>) result), equalTo(Arrays.asList(appServer.whereIs(""))));
}
@Test
public void canExecuteScriptThatReturnsAnElement() {
driver.get(appServer.whereIs(""));
Object result = getWebDriver().executeScript("return document.body;");
assertThat(result, instanceOf(WebElement.class));
assertThat(((WebElement) result).getTagName(), equalTo("body"));
}
@Test
@SuppressWarnings("unchecked")
public void canExecuteScriptThatReturnsAListOfElements() {
driver.get(appServer.whereIs("form.html"));
Object result = getWebDriver().executeScript("return document.getElementsByTagName('input');");
assertThat(result, instanceOf(List.class));
List<WebElement> elements = (List<WebElement>) result;
assertThat(elements.size(), equalTo(3));
}
@Test
@SuppressWarnings("unchecked")
public void canExecuteScriptThatReturnsLocation() {
driver.get(appServer.whereIs(""));
Object result = getWebDriver().executeScript("return window.location;");
assertThat(result, instanceOf(Map.class));
assertThat(((Map<String, Object>) result).get("href"), equalTo((Object) appServer.whereIs("")));
}
@Test
public void canExecuteAsyncScript() {
Object result = getWebDriver().executeAsyncScript("arguments[arguments.length - 1](123);");
assertThat(result, instanceOf(Number.class));
assertThat(((Number) result).intValue(), equalTo(123));
}
@Test(expected = ScriptTimeoutException.class)
public void shouldTimeoutIfScriptDoesNotInvokeCallbackWithAZeroTimeout() {
driver.get(appServer.whereIs("ajaxy_page.html"));
getWebDriver().executeAsyncScript("window.setTimeout(function() {}, 0);");
}
@Test
public void shouldNotReturnSourceOfOldPageWhenLoadFailsDueToABadHost() {
driver.get(appServer.whereIs(""));
String originalSource = driver.getPageSource();
driver.get("http://thishostdoesnotexist.norshallitever");
String currentSource = driver.getPageSource();
assertThat(currentSource, not(equalTo(originalSource)));
}
@Test(expected = UnsupportedOperationException.class)
public void imeIsNotSupported() {
driver.manage().ime();
}
private HtmlUnitDriver getWebDriver() {
return ((HtmlUnitDriver) driver);
}
private void openNewWindow(WebDriver driver) {
((HtmlUnitDriver) driver).executeScript("window.open('new')");
}
} |
Assessment of the Nutritional Profile of Women with Breast Cancer from the Agadir Region (South of Morocco) Although the incidence of breast cancer and the resulting mortality are very high in Morocco, no study has been carried out on the role of the nutritional factors in the development of BC. The objective of this study was to assess the nutritional profile of women with BC in southern Morocco Methods: The study was conducted with 91 women with breast cancer. Face-to-face semi-structured interviews were used for the assessment of the nutritional profile and the collection of socio-economic data. Biometric measures were carried out in parallel. The results showed that postmenopausal women had a significantly higher mean weight and Body Mass Index than non-menopausal women (p < 0.015). The majority of patients (79%) had energy intakes above recommendations. The proportion of lipids was excessive in 46% of cases. Intakes of saturated fatty acids were high in 14% of patients. But those of unsaturated fatty acids were high in over 50% of patients. About 58 % had a very high intake of fast sugars. Cholesterol input was high in 40% of cases. Vitamins A, E and D were provided in small amounts, respectively in 66%, 45% and 91% of patients. Likewise, intakes were low for water-soluble vitamins, especially Vitamins B9 (62.6%) and B12 (54%). Almost the majority of participants in our study (92%) had very low calcium intakes. Inputs of magnesium, zinc and selenium were insufficient in 43%, 35% and 48% of patients respectively. Obesity, excessive energy and sugar intake, as well as mineral and vitamin deficiencies could explain the high incidence of breast cancer in southern Morocco. A balanced diet would fight against breast cancer. Among tumors, breast cancer (BC) is one of the most diagnosed with an incidence and mortality rate reaching respectively 24.5% and 15.5% worldwide 1. BC is a complex condition subdivided into molecular subtypes related to the status of estrogen (ER) and progesterone (PR) receptors, and human epidermal growth factor (HER2) receptors. Similarly, two phenotypic subtypes of expression of hormonal receptors in the epithelial cells of the ducts or lobules of the mammary glands are distinguished; luminal A cells have more ER than luminal B cells. BC may have a different etiology depending on the status of the receptors 2. For example, the literature reported a strong binding of ER + type to reproductive factors 3, hormone replacement therapy in menopause 4 and the mass body index (BMI) 5. Diet also plays an important role in the development of BC 6. BC was favored by cereals and available sugars and inversely related to vegetables and polyunsaturated fatty acids.Nutrition is also of major importance as many patients (40%) make lifestyle changes, including diet, following a positive diagnosis of cancer. Some studies have reported that patients with BC have an inadequate diet, which may contribute to their deteriorating health status during treatment 13. A balanced and healthy diet could prevent the occurrence of BC, improve the state of health of patients and prevent comorbidities linked to this condition, particularly cardiovascular diseases which represent the most frequent comorbidity and the main cause of death unrelated to BC in women over 50 years of age 14. Incidence (36.9%) and mortality (24.7%) values linked to BC in Morocco are higher than the world average 15. This raises questions about the etiology of this disease, especially with regard to the diet and quality of life of Moroccan women. To our knowledge, no study has been undertaken to evaluate the nutritional status of women with BC in Morocco. This study aims at assessing the nutritional profile of women from the Agadir region (southern Morocco) with BC, in relation to their pathological status and their anthropometric characteristics. sampling and data Collection The study was carried out between January 2019 and February 2020. A convenience sample of 91 women with BC was recruited from the Hassan II Regional Hospital Center and the Agadir Regional Center of Oncology. All subjects were initially informed that their biological and anthropometric data would be exploited for scientific purposes. All participants signed informed and express consent. The study was authorized by the Moroccan Ministry of Health (authorization number 3851/02092017) and approved by the Moroccan Association of Research and Ethics (approval number 4/REC/20). Recruitment concerned patients with BC without metastases, with postoperative, preoperative, ongoing or completed treatment, and without any sign of recurrence or relapse. The exclusion criteria were metastatic BC and other cancers, decompensated heart disease, hepatic failure, renal failure, psychiatric illness and any refusal to sign consent. Data were collected using a questionnaire in direct interviews, face-to-face, and from medical records. They included education level, occupation, marital status, parity and hormonal status of the respondent (menopausal status and use of contraception). BCs have been classified into molecular subtypes based on the expression of hormone receptors (ER + / ER-; PR + / PR-, HER2 +/-; luminal A and luminal B). Body mass, height, waist circumference and hip circumference were determined and compared to WHO recommendations 16. Abdominal obesity was indicated if the waist circumference was e" 88 cm or 0.85 for the waist-to-hip ratio (TT / TH). The body analysis was performed using a Tanita® BC 418 MA analyzer (Tanita Corporation, USA) which determines weight, BMI, percentage fat mass (FM), lean mass (LM) and visceral fat (VF). All of these measurements were carried out while standing. The BMI was classified according to the WHO recommendations as underweight (BMI <18.5 kg / m2), normal weight (18.5 d" BMI 24.9 kg / m2), overweight (25 d" BMI 29.9 kg / m2) and obese (BMI> 30 kg / m2). Based on their body fat (BF) percentage, the patients were classified into a lean group, a normal BF group and an excess BF group (with body fat levels <24%, between 24% and 33%, and > 33% respectively). All patients were asked to complete three nonconsecutive days' food diary (two working days and one weekend day). To estimate food servings, photos of household measurements such as plates, spoons, glasses, bowls, and cups were shown to patients using SuviMax survey books. Nutritional intake calculations were performed using Nutrilog® software (Ver2.20). Several nutritional variables were evaluated, including energy intake, macronutrients (proteins, carbohydrates, lipids, cholesterol and fiber), vitamins (A, D, E, B2, B6, folic acid, B12, C), minerals and oligoelements (calcium, iron, magnesium, zinc, selenium). Due to the lack of standards or recommended intakes specific to the Moroccan population, nutritional intakes were determined by referring to those recommended by The Institute of Medicine of the National Academies (USA) 17. statistical analyzes Statistical analyzes were performed using SPSS-20 software. Data represented mean ± standard deviation for quantitative variables and as a percentage for qualitative variables. Comparisons between groups were made using analysis of variance (ANOVA) and Student's "t" test. Pearson's correlation test was used to assess associations between nutrient intakes and anthropometric parameters of different groups. results Table 1 summarizes the socio-demographic and pathological characteristics of the patients. The results show that 79% were from the urban areas, close to 65% were illiterate and only 3.3% had university level. Three quarters were housewives. The majority were married (56%). The menopausal status represented 54% and 65% of the respondents used oral contraception Table 2 shows the anthropometric parameters of the interviewees. The average age was 48.54 ± 9.83 years. Their weight and BMI were 68.69 ± 11.60 and 28.56 ± 4.73 respectively. Postmenopausal women had a significantly higher mean weight and BMI than non-menopausal women (p < 0.015). We found that about 75% of them were overweight or obese and 84% had a very high waist circumference. The patients also presented an excess of BF. The BF rate was significantly higher in postmenopausal than in premenopausal women (p < 0.007). Table 3 displays the intakes of various nutrients among the participants in our survey. The majority of them (79%) had energy intake well above the recommendations. The contribution of lipids in these intakes was excessive for 46% of patients. As for the qualitative aspect, the intakes of saturated fatty acids were only high in 14% of cases. Intake of saturated fatty acids was positively correlated with tumor size (r = 0.233 and p = 0.026). The consumption of monounsaturated fatty acids was high in 50.55%, and low in 23% of (Table 3). Intakes of polyunsaturated fatty acids were very high in nearly 53% of patients and only 6.6% had very low intakes. The consumption of sugars was important. About 42% had a very high intake, with a high consumption of fast sugars in 58%. As for proteins, the intake was satisfactory in 61.54% and low in only 13.2% of cases. No significant difference was observed between the cancer subtypes except for the sugars intake that was significantly very high in women with RP + compared to RP-subtype (table 4). The cholesterol intake was too high in 40% of patients (table 3). These intakes were significantly greater in premenopausal women than in postmenopausal women (p < 0.01). The fiber content of food was low in approximately 31% of patients. Obese women consumed significantly lower amounts of fiber than non-obese women (p < 0.05). The majority of the patients (92%) had very low Ca intakes. On the other hand, phosphorus intakes were very high compared to the recommendations in 93% of cases. Mg, Zn and Se intakes were insufficient in 43%, 35% and 48% of cases respectively, but Fe intake was satisfactory in 74% of patients. For fat-soluble vitamins, approximately 66%, 45% and 91% of patients had very low intakes of vitamins A, E and D respectively. As for water-soluble vitamins, it should be noted that the intakes were insufficient in vitamin C for 32%, B1 for 24%, B2 for 35.2%, B6 for 10%, B9 for 62.6%, and B12 for 54%. The comparison (Table 5). The same was true for Vit B2 whose intake was significantly lower for ER+ and PR+ compared to ER-and PR-subtypes respectively (p < 0.001) and for Vit B12 with a lower value (p < 0.001) for the HER2+ compared to HER2-subtype (table 5). disCussion This work represents the first study of the main anthropometric and nutritional characteristics of Moroccan women with BC. According to our results, about 38% of the patients were obese. This value is higher than the value of 29 % reported on Moroccan women 18. An increase in body weight in women with BC was previously reported 19. Based on BMI, % Fat and waist-to-hip ratio, the participants in our study mainly suffered from abdominal obesity. The results also showed that overweight and obesity, particularly abdominal obesity, affect postmenopausal women more than premenopausal women. Some studies noted weight gain in neo-adjuvant therapy in 50 -96% of women with BC 20,21. This excess weight can be a factor of poor prognosis for patients. In fact, obese women are more likely to have large tumors, advanced disease at diagnosis, high rates of metastasis and can develop resistance to endocrine therapy 22,23. Obesity was also associated with a significant increase in the risk of death from all causes and a marginally significant risk of mortality from BC 19. In this regard, Flanagan et al. 24 reported that an increase of 1 kg / m 2 in BMI would imply a 3% increase in the probability of recurrence of BC; and that women who were obese at the time of BC diagnosis had a 1.6 times higher risk of recurrence than women with a normal BMI. Other authors noted the existence of an increased risk of developing second BC and a poor prognosis associated with obesity and / or weight gain 25,26. Obesity and overweight can also affect the effectiveness of antineoplastic treatments, increase their side effects and complicate management due to related comorbidities such as hypertension, hyperlipemia and diabetes 27. Indeed, the fat mass in particular the visceral fat responsible for abdominal obesity is a metabolically active fat 21. It secretes several substances such as adipokines, growth factors and inflammatory cytokines. These molecules are involved in cell survival or apoptosis, angiogenesis, migration and proliferation 21, which allows them to play an important role in the occurrence, development, recurrence, metastasis and mortality from BC 28,29. In general, the nutritional contributions recorded by our study were very unbalanced. Energy intakes were very high in the majority of patients, especially those in post-menopause. Their carbohydrate intakes, especially simple sugars, were very high. These factors, combined with a sedentary lifestyle, explain the overweight and obesity recorded, especially among postmenopausal women. Excessive intakes can promote tumor growth 30. Likewise, high intakes of saturated fat increase the risk of mortality due to BC, but also due to associated comorbidities 31. Food intake of vitamin A, D and E was low in the majority of our patients. This finding hardly comforts the patients. Indeed, large intakes of -carotene (provitamin A) in pre-diagnosis of BC were significantly associated with an improvement in overall survival 32. We also observed significantly lower intakes of Vit A in women with positive markers of ER, PR and HER subtypes. These results are in agreement with those of Cui et al. 33 who report an inverse association between the risk of BC and dietary carotenoids in menopausal women with ER + and PR +. Similarly, the vitamin D deficiency observed in our study is thought to be one of the risk and mortality factors associated with BC 34. Vitamin D is involved in the differentiation, proliferation and apoptosis of epithelial cells. A normal serum 25 Hydroxy -Vit D level (> 30ng / mL) at diagnosis was significantly correlated with an improvement in BC-specific survival at least after 3 years of follow-up. Vitamin E also has pro-apoptotic, antiproliferative and angiogenesis inhibitory activities 38,39. It could also be associated with antineoplastic therapies to fight against metastases and improve immune and anti-inflammatory functions. In fact, adequate vitamin E intake was associated with a decreased risk of recurrence of BC and overall mortality 38,41,42. Vitamin B9 and B12 intakes were low in 62% and 52% of the women interviewed, respectively. This deficiency can have negative consequences on women with BC, as folates play an important role in DNA synthesis, methylation and repair. However, supplementation of the diet with folates must take into account that in high doses this vitamin constitutes a risk factor for the development of cancers 46. Vitamin B12 can have a positive effect in BC. Its use before and during chemotherapy allowed a significant increase in BC survival. In addition, various forms of Vit B12 showed anti-tumor activity. Thus, methylcobalamin slowed tumor growth and induced apoptosis in carcinoma cells in mice, although growth promoters such as androgens have been used 51,52. The 5'-deoxyadenosylcobalamin and methylcobalamin have cytotoxic properties 53. Methylcobalamin, in addition to its action on tumor growth, increased survival time in mice 54. Another factor compromising the patients' prognosis is the low intake of vitamin C. Indeed, several studies supported the fact that an adequate intake of vitamin C was associated with a reduction in the risk of recurrence and / or mortality in patients with BC 55,56. High doses of vitamin C induce apoptosis, reduce cell proliferation and the number of invading cancer cells, and prevent metastasis 57. These effects are even more marked in cases of aggressive BC, as is the case with that of the triple negative subtype 58. Our results also showed that the Ca, Mg, Zn and Se intakes were below the required values. Even if the relationship between Ca and BC intakes is not yet well established, this mineral may be involved in breast carcinogenesis through its important role in the regulation of cell proliferation, differentiation and apoptosis 59,60. In fact, a high calcium intake decreases breast carcinogenesis and the uncontrolled proliferation of epithelial cells induced by fat in the breast and / or by a chemical carcinogen in rodents 61,62. On another hand, low intakes of magnesium could compromise patient survival. Indeed, a deficiency in Mg can alter certain biological functions in women with BC, in particular those linked to cell proliferation and signaling, and DNA synthesis and repair 63,64. However, any additional intake of Mg must take into account that of Ca, since the latter behaves as a competitor for Mg, particularly in terms of intestinal absorption 65. Deficiencies in Zn and Se, associated with those of vitamins C and E, can lead to an increase in cellular oxidative stress, which would lead to DNA damage 33. In fact, the development of BC is accompanied by oxidative stress, which increases with the progression of the disease and the levels of antioxidant defenses decrease during antineoplastic treatments 66,67. In addition to their antioxidant role, vitamin E, vitamin C and Se selectively induce apoptosis in cancer cells 66,68,69. Furthermore, Zn and organic Se inhibit tumor growth and provide more protection against BC metastasis 70,71. ConClusion The nutritional profile of Moroccan women with BC showed many imbalances. On the one hand, the intakes of energy, free sugars and saturated fatty acids were high. On the other hand, vitamins A, D, E, B9, B12 and the trace element Zn and Se showed a significant deficit. Improving nutritional quality would help fight against the occurrence of breast cancer and help patients recover better following antineoplastic treatments. aCknoWledgeMent We would like to thank all the respondents who participated in this work. Conflict Interest None. Funding source None. authors' contributions Authors contributed equally to this work. |
package io.webby.netty.marshal;
import com.google.common.io.CharStreams;
import com.google.inject.Inject;
import io.webby.common.InjectorHelper;
import jodd.json.JsonParser;
import jodd.json.JsonSerializer;
import org.jetbrains.annotations.NotNull;
import java.io.*;
import java.nio.charset.Charset;
public record JoddJsonMarshaller(@NotNull JsonSerializer serializer, @NotNull Charset charset) implements Json, Marshaller {
@Inject
public JoddJsonMarshaller(@NotNull InjectorHelper helper, @NotNull Charset charset) {
this(helper.getOrDefault(JsonSerializer.class, JoddJsonMarshaller::defaultJoddSerializer), charset);
}
@Override
public @NotNull Marshaller withCustomCharset(@NotNull Charset charset) {
return new JoddJsonMarshaller(serializer, charset);
}
@Override
public void writeChars(@NotNull Writer writer, @NotNull Object instance) throws IOException {
serializer.serialize(instance, writer);
}
@Override
public @NotNull String writeString(@NotNull Object instance) {
return serializer.serialize(instance);
}
@Override
public <T> @NotNull T readChars(@NotNull Reader reader, @NotNull Class<T> klass) throws IOException {
return readString(CharStreams.toString(reader), klass);
}
@Override
public <T> @NotNull T readString(@NotNull String str, @NotNull Class<T> klass) {
return JsonParser.create().parse(str, klass);
}
private static @NotNull JsonSerializer defaultJoddSerializer() {
return JsonSerializer.create()
.deep(true);
}
}
|
A group said it had been planning 'rapid response' protests in case of a major development that could threaten the Robert Mueller Russia probe.
Protests in at least 900 cities nationwide, aimed at protecting Special Counsel Robert Mueller's investigation into Russian interference in the 2016 presidential election, are planned for Thursday. The mobilization comes after President Donald Trump fired Attorney General Jeff Sessions Wednesday, replacing him with a man who has previously criticized the probe and offered suggestions on how to slow it down.
The group Nobody Is Above The Law said on its website that the protests are planned in cities across the U.S. at 5 p.m. local time Thursday, and added that Trump "crossed a red line."
Sessions resigned Wednesday -- indicating he did so at Trump's request -- and the President appointed Matthew Whitaker, Sessions' chief of staff, as the Acting Attorney General. The move superseded the regular order of succession which would have placed Deputy Attorney General Rod Rosenstein the Acting AG role.
Rosenstein has been overseeing the Mueller probe after Sessions recused himself from the investigation, but a Department of Justice spokeswoman said Whitaker would be "in charge of all matters under the purview" of the DOJ.
Critics worry Whitaker may be unlikely or unwilling to defend the investigation, given his history of partisanship and loyalty to Trump. Nobody Is Above The Law said it wants Whitaker to recuse himself from supervising the investigation.
"Trump putting himself above the law is a threat to our democracy, and we’ve got to get Congress to stop him," the group said on its website.
A Google map screenshot from the Nobody Is Above The Law website showing hundreds of planned protests for Nov. 8, 2018. The protests are meant to raise awareness to protect the investigation of Special Counsel Robert Mueller.
3. Preventing the investigation from being conducted freely by firing Rosenstein or repealing regulations establishing the office.
4. If Mueller's findings show significant wrongdoing by Trump or if the findings are hidden from the public.
How many people will show up remains to be seen. The group also urged people to pressure Congress to protect the Mueller investigation.
The change of leadership at the DOJ came one day after Democrats won control of the House of Representatives, meaning Trump will face more oversight than he has under the Republican-controlled Congress. |
PWE-194We've got to the caecumnow what will we do with the polyps? Introduction Recent work, especially in the national Bowel Cancer Screening Programme (BCSP) has focussed on adenoma detection and removal as a marker of quality of colonoscopy. It is vital that this quality assurance is applied to all patients undergoing colonoscopy and that we move away from caecal intubation rate (CIR) as the main marker of a successful colonoscopy. We aimed to review practice in terms of adenoma detection and removal technique among all NHS colonoscopist in a busy district general hospital. Methods Procedural data were retrospectively collected from Endosoft reporting software for all colonoscopies performed in a 6-month period. BSCP lists were excluded. The reports were reviewed and data collected including operator, size of list, extent of procedure, and details of polyps foundsize, location, description, whether removed or biopsied, method of removal and if tattoo used. In addition, the completeness of the report was recorded. Where polyps were removed, the histology result was also recorded. Results 472 procedures were performed by 18 operatorsthree trainees, two nurse endoscopists, and 13 consultants (eight gastroenterology, five surgical). 159 procedures identified polyps (246 polyps in total), with a unit polyp detection rate of 33.7%. Individual polyp detection rates varied between 14.7% and 58.8%. Histology showed a unit adenoma detection rate (ADR) of 21%. Eight cancers and one polyp cancer were detected. Documentation of polyp location was good (240/246) but size and description were less well documented (171 and 185 out of 246 respectively). 211 polyps were removed, 31 left in situ, and unclear in 4. 26 polyps removed were ≥10mm, of which nine with a snare and 16 by EMR (one unknown.) Smaller polyps were removed by a variety of methods (Abstract PWE-194 table 1).Abstract PWE-194 Table 1 Polypectomy methods Polyp size Cold biopsy Hot biopsy Snare EMR Not removed Unknown ≤3mm 10 0 23 14 6 0 45mm 2 3 18 18 3 1 69mm 0 2 18 7 1 2 Conclusion ADR in this unit is comparable to elsewhere in the UK, but not as high as within the BCSP, although this represents a different patient population. Documentation of these polyps varied greatly, and could be improved. Detection rate and removal methods varied widely between endoscopists. This prompted the creation of an aide memoir poster (see Abstract PWE-194 figure 1) to be displayed in the endoscopy room, advising on documentation and highlighting the current guidance for management of polyps. Teaching was also undertaken at dedicated polypectomy afternoons, with a view to re-assessing polyp management at a later date, using ADR as quality marker.Abstract PWE-194 Figure 1 Competing interests None declared. |
The first batch of the Chronic Pain CBIS Transdermal Patch will soon be released, offering a cannabinoid-based alternative to people around the globe who self-medicate for fibromyalgia and other chronic pain conditions, Cannabis Science has announced.
In November 2016, the company announced the development of two new medications for pain relief in people with fibromyalgia, neuropathy, diabetic nerve pain, back strains, and other pain-related conditions. These pain relievers correspond to transdermal adhesive patches that deliver a certain dose of medication into the bloodstream by absorption through the patient’s skin.
The first CBIS Transdermal Patch contains high-potency cannabinoid (CBD) extract, the second major cannabinoid in marijuana after tetrahydrocannabinol (THC). CBD has all the anti-inflammatory and pain-relieving properties without any psychoactive effects, and research has demonstrated that it can be an effective treatment for inflammatory pain. Some studies have even demonstrated that CBD might outperform traditional pain medication in some cases.
“The CBIS medicated adhesive transdermal patch is placed on the skin to deliver a specific dose of cannabinoid medications into the bloodstream,” Allen Herman, chief medical officer of Cannabis Science, said in a press release. “Self-medicating patients have been reporting promoted healing effects to specific injured areas of the body experiencing chronic pain.
Herman said the transdermal patch has an advantage over other types of medication delivery, including oral, topical, intravenous, and intramuscular, because it can “provide a controlled release of the medication into the patient, usually through a porous membrane covering a reservoir of medication or body heat that melts thin layers of high potency cannabinoid formulations embedded in the adhesive; the medication slowly enters the bloodstream at the chronic pain spot and then penetrates the central nervous system, delivering the pain relief reported by self-medicating patients.”
Cannabis Science plans to continue to grow its distribution network. The company announced that it will add Kush Factory in Los Angeles, California, as CBIS’ newest Spotlight Dispensary for the month of July 2017. As Spotlight Dispensary for the month, Kush Factory will be the first to carry the Chronic Pain CBIS Transdermal Patches. |
// Run executes the scheduler's tasks.
func (s *Scheduler) Run(ctx context.Context) error {
if s.dirty {
if err := s.sort(); err != nil {
return err
}
s.dirty = false
}
for _, set := range s.tasks {
if err := s.runner.Run(ctx, set); err != nil {
return err
}
}
return nil
} |
<gh_stars>1-10
/*
* Copyright (c) 2020-2022 <NAME> <EMAIL>
* zlib License, see LICENSE file.
*/
#ifndef BF_WAVE_GENERATOR_H
#define BF_WAVE_GENERATOR_H
#include "bn_math.h"
#include "bn_span.h"
#include "bn_fixed.h"
namespace bf
{
class wave_generator
{
public:
[[nodiscard]] constexpr int speed() const
{
return _speed;
}
constexpr void set_speed(int speed)
{
BN_ASSERT(speed >= 0, "Invalid speed: ", speed);
_speed = speed;
}
[[nodiscard]] constexpr int amplitude() const
{
return _amplitude;
}
constexpr void set_amplitude(int amplitude)
{
BN_ASSERT(amplitude >= 1 && amplitude <= 4, "Invalid amplitude: ", amplitude);
_amplitude = amplitude;
}
constexpr void generate(bn::span<bn::fixed> values) const
{
switch(_amplitude)
{
case 1:
_generate_impl<1>(_speed, values);
break;
case 2:
_generate_impl<2>(_speed, values);
break;
case 3:
_generate_impl<3>(_speed, values);
break;
case 4:
_generate_impl<4>(_speed, values);
break;
default:
BN_ERROR("Invalid amplitude: ", _amplitude);
break;
}
}
private:
int _speed = 1024;
int _amplitude = 4;
template<int Amplitude>
constexpr static void _generate_impl(int speed, bn::span<bn::fixed>& values)
{
int a = (4096 / (1 << Amplitude));
int b = (1 << (Amplitude - 1));
bn::fixed* values_data = values.data();
for(int index = 0, limit = values.size(); index < limit; ++index)
{
int lut_angle = int(uint16_t(index * speed)) >> 5;
int sin = bn::lut_sin(lut_angle).data();
values_data[index] = (sin / a) - b;
}
}
};
}
#endif
|
import unittest
import nlpaug.augmenter.word as naw
class TestStopWords(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.stopwords = ['a', 'an', 'the']
def test_delete(self):
text = 'The quick brown fox jumps over lazy dog'
self.assertLess(0, len(text))
aug = naw.StopWordsAug(stopwords=['fox'])
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertTrue('fox' not in augmented_text)
# Test case sensitive = False
aug = naw.StopWordsAug(stopwords=['the'], case_sensitive=False)
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
# Test case sensitive = True
aug = naw.StopWordsAug(stopwords=['the'], case_sensitive=True)
augmented_text = aug.augment(text)
self.assertEqual(text, augmented_text)
|
// Returns a page that displays the services in a cluster
func NewServicesPage() *ClusterDetailsPage {
servicesTable := tview.NewTable()
servicesTable.
SetBorders(true).
SetBorder(true).
SetTitle(" 📋 ECS Services ")
servicesTableInfo := &ui.TableInfo{
Table: servicesTable,
Alignment: []int{ui.L, ui.L, ui.L, ui.L, ui.L, ui.L, ui.R, ui.R, ui.R},
Expansions: []int{1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1},
Selectable: true,
}
ui.AddTableConfigData(servicesTableInfo, 0, [][]string{
{"#", "Name ▾", "TaskDef", "Images", "Status", "Deployed", "Tasks"},
}, tcell.ColorYellow)
return &ClusterDetailsPage{
"Services",
servicesTableInfo,
servicesPageRenderer(servicesTableInfo),
}
} |
Criminal Investigation - Art or a Science There is a trend among teachers and practitioners of criminal investigation to consider quantitative methods of research to be far more valid and efficacious than qualitative methods. The purpose of the paper is to persuade the reader that criminal investigation is more art than science, and quantitative research should merely be ancillary to qualitative. More articles and books are needed that are not limited in scope to the quantifiable, but are based upon or address the subject of qualitative methodology. |
The Role of PTIP in Maintaining Embryonic Stem Cell Pluripotency Pax transactivation domaininteracting protein (PTIP) is a ubiquitously expressed, nuclear protein that is part of a histone H3K4 methyltransferase complex and is essential for embryonic development. Methylation of H3K4 is an epigenetic mark found on many critical developmental regulatory genes in embryonic stem (ES) cells and, together with H3K27 methylation, constitutes a bivalent epigenetic signature. To address the function of PTIP in ES cells, we generated ES cell lines from a floxed ptip allele and deleted PTIP function with Cre recombinase. The ptip−/− ES cell lines exhibited a high degree of spontaneous differentiation to trophectoderm and a loss of pluripotency. Reduced levels of Oct4 expression and H3K4 methylation were observed. Upon differentiation, ptip−/− embryoid bodies showed reduced levels of marker gene expression for all three primary germ layers. These results suggest that the maintenance of H3K4 methylation is essential and requires PTIP function during the in vitro propagation of pluripotent ES cells. STEM CELLS 2009;27:15161523 |
export interface IQuiz {
author: string;
_id: string;
question: string;
answer: string;
topic: string;
created: Date;
}
export interface IQuizForm {
author?: string;
question: string;
answer: string;
topic: string;
}
export interface IQuizFn extends IQuiz {
quizDelete: () => void;
showAnswer: string;
revealAnswer: () => void;
}
export interface IQuizDelete {
deleteQuiz: () => void;
}
export interface IModal {
show: boolean;
handleClose: () => void;
handleOpen: () => void;
addQuiz: (data: IQuiz) => void;
}
|
The effect of electronic dental analgesia during sonic scaling. The aim of the present study was to investigate the effects of electronic dental analgesia (EDA) during sonic scaling. The clinical trial included 30 healthy adult subjects and was conducted as a randomised single-blind split-mouth design. The applied procedure consisted of periodontal scaling by means of a sonic scaler, while using the EDA device either in an active or placebo state. Rather similar results were obtained for the subjective pain rating in both the active and the placebo trials. When patients rated their discomfort on a scale 0-4 from no pain to very severe pain, the mean (s.d.) score for both the EDA and the placebo was 1.2 (0.6). The subjective pain estimate was positively correlated to the electrical current intensity provided. This implied that with a stronger pain experience, patients tried to administer more anaesthesia by turning the dial of the control box to an increased intensity of the electrical current. This remained insufficient to eliminate pain sensation. It was concluded that application of electronic dental anaesthesia in periodontal treatment remains questionable. |
///<reference path="../../../node_modules/@types/meteor-accounts-phone/index.d.ts"/>
import { Injectable } from '@angular/core';
@Injectable()
export class PhoneService {
verify(phoneNumber: string): Promise<void> {
console.log(phoneNumber);
return new Promise<void>((resolve, reject) => {
Accounts.requestPhoneVerification(phoneNumber, (e: Error) => {
if (e) {
return reject(e);
}
resolve();
});
});
}
loginWithUsername(username :string, password : string) {
console.log(username, password);
return new Promise<void>((resolve, reject) => {
Meteor.loginWithPassword(username, password, (e: Error) => {
if (e) {
return reject(e);
}
resolve();
});
});
}
signup(objectblock) {
console.log(objectblock.username, objectblock.fname);
return new Promise<void>((resolve, reject) => {
Meteor.call('signup', objectblock.username, objectblock.password, objectblock.email, objectblock.fname, objectblock.lname, objectblock.imsi,
(error, result) => {
if(error) reject(error);
if(result) resolve(result);
});
});
}
login(phoneNumber: string, code: string): Promise<void> {
console.log(phoneNumber);
return new Promise<void>((resolve, reject) => {
Accounts.verifyPhone(phoneNumber, code, (e: Error) => {
if (e) {
return reject(e);
}
resolve();
});
});
}
logout(): Promise<void> {
return new Promise<void>((resolve, reject) => {
Meteor.logout((e: Error) => {
if (e) {
return reject(e);
}
resolve();
});
});
}
}
|
Letters to the Editor Although it is true that the rate of gastric emptying varies with the volume of its contents, it also varies with a number of other factors. The volume difference between preterm and term neonates in our study was dictated by practicality and in an attempt to reflect actual nutritional management of neonates. The feeding of 10 ml/kg to term babies might have resulted in almost complete gastric emptying by 30 minutes, but on the other hand we did not feel justified in exposing a small premature neonate to a bolus of 20 ml/kg. |
Linking peace with reconciliation Purpose The purpose of this paper is to explain Japans role in the peace process on the Korean Peninsula that began in early 2018. Design/methodology/approach This paper emphasizes the historical context of international politics in Northeast Asia, rather than power politics or geopolitics. The paper reaffirms the significance of the ongoing peace process on the Korean Peninsula by considering a synthesis of three joint declarations published in 1998, 2000 and 2002 between the Republic of Korea (ROK) and Japan, the ROK and Democratic Peoples Republic of Korea (DPRK), and between the DPRK and Japan. Findings The normalization of diplomatic relations between DPRK and Japan, along with reaffirmation of the joint declaration between the ROK and Japan, and the Panmunjeom Declaration, would be a base for denuclearizing Northeast Asia. Originality/value In Northeast Asia, historical reconciliation among the two Koreas and Japan and peace-building between the two parties on the Peninsula are closely linked. Moreover, the three bilateral relationships among these three parties are also the basis for creating a new multilateral security order in Northeast Asia. |
package agent
import (
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
)
func TestDiscoveryChainRead(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
newTarget := func(service, serviceSubset, namespace, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, datacenter)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI
return t
}
for _, method := range []string{"GET", "POST"} {
require.True(t, t.Run(method+": error on no service name", func(t *testing.T) {
var (
req *http.Request
err error
)
if method == "GET" {
req, err = http.NewRequest("GET", "/v1/discovery-chain/", nil)
} else {
apiReq := &discoveryChainReadRequest{}
req, err = http.NewRequest("POST", "/v1/discovery-chain/", jsonReader(apiReq))
}
require.NoError(t, err)
resp := httptest.NewRecorder()
_, err = a.srv.DiscoveryChainRead(resp, req)
require.Error(t, err)
_, ok := err.(BadRequestError)
require.True(t, ok)
}))
require.True(t, t.Run(method+": read default chain", func(t *testing.T) {
var (
req *http.Request
err error
)
if method == "GET" {
req, err = http.NewRequest("GET", "/v1/discovery-chain/web", nil)
} else {
apiReq := &discoveryChainReadRequest{}
req, err = http.NewRequest("POST", "/v1/discovery-chain/web", jsonReader(apiReq))
}
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
require.NoError(t, err)
value := obj.(discoveryChainReadResponse)
expect := &structs.CompiledDiscoveryChain{
ServiceName: "web",
Namespace: "default",
Datacenter: "dc1",
Protocol: "tcp",
StartNode: "resolver:web.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:web.default.dc1": &structs.DiscoveryGraphNode{
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "web.default.dc1",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "web.default.dc1",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.dc1": newTarget("web", "", "default", "dc1"),
},
}
require.Equal(t, expect, value.Chain)
}))
require.True(t, t.Run(method+": read default chain; evaluate in dc2", func(t *testing.T) {
var (
req *http.Request
err error
)
if method == "GET" {
req, err = http.NewRequest("GET", "/v1/discovery-chain/web?compile-dc=dc2", nil)
} else {
apiReq := &discoveryChainReadRequest{}
req, err = http.NewRequest("POST", "/v1/discovery-chain/web?compile-dc=dc2", jsonReader(apiReq))
}
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
require.NoError(t, err)
value := obj.(discoveryChainReadResponse)
expect := &structs.CompiledDiscoveryChain{
ServiceName: "web",
Namespace: "default",
Datacenter: "dc2",
Protocol: "tcp",
StartNode: "resolver:web.default.dc2",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:web.default.dc2": &structs.DiscoveryGraphNode{
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "web.default.dc2",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "web.default.dc2",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.dc2": newTarget("web", "", "default", "dc2"),
},
}
require.Equal(t, expect, value.Chain)
}))
require.True(t, t.Run(method+": read default chain with cache", func(t *testing.T) {
var (
req *http.Request
err error
)
if method == "GET" {
req, err = http.NewRequest("GET", "/v1/discovery-chain/web?cached", nil)
} else {
apiReq := &discoveryChainReadRequest{}
req, err = http.NewRequest("POST", "/v1/discovery-chain/web?cached", jsonReader(apiReq))
}
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
require.NoError(t, err)
// The GET request primes the cache so the POST is a hit.
if method == "GET" {
// Should be a cache miss
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
} else {
// Should be a cache HIT now!
require.Equal(t, "HIT", resp.Header().Get("X-Cache"))
}
value := obj.(discoveryChainReadResponse)
expect := &structs.CompiledDiscoveryChain{
ServiceName: "web",
Namespace: "default",
Datacenter: "dc1",
Protocol: "tcp",
StartNode: "resolver:web.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:web.default.dc1": &structs.DiscoveryGraphNode{
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "web.default.dc1",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "web.default.dc1",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.dc1": newTarget("web", "", "default", "dc1"),
},
}
require.Equal(t, expect, value.Chain)
}))
}
{ // Now create one config entry.
out := false
require.NoError(t, a.RPC("ConfigEntry.Apply", &structs.ConfigEntryRequest{
Datacenter: "dc1",
Entry: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "web",
ConnectTimeout: 33 * time.Second,
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Datacenters: []string{"dc2"},
},
},
},
}, &out))
require.True(t, out)
}
// Ensure background refresh works
require.True(t, t.Run("GET: read modified chain", func(t *testing.T) {
retry.Run(t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/discovery-chain/web?cached", nil)
r.Check(err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
r.Check(err)
// Should be a cache hit! The data should've updated in the cache
// in the background so this should've been fetched directly from
// the cache.
if resp.Header().Get("X-Cache") != "HIT" {
r.Fatalf("should be a cache hit")
}
value := obj.(discoveryChainReadResponse)
expect := &structs.CompiledDiscoveryChain{
ServiceName: "web",
Namespace: "default",
Datacenter: "dc1",
Protocol: "tcp",
StartNode: "resolver:web.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:web.default.dc1": &structs.DiscoveryGraphNode{
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "web.default.dc1",
Resolver: &structs.DiscoveryResolver{
ConnectTimeout: 33 * time.Second,
Target: "web.default.dc1",
Failover: &structs.DiscoveryFailover{
Targets: []string{"web.default.dc2"},
},
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.dc1": newTarget("web", "", "default", "dc1"),
"web.default.dc2": newTarget("web", "", "default", "dc2"),
},
}
if !reflect.DeepEqual(expect, value.Chain) {
r.Fatalf("should be equal: expected=%+v, got=%+v", expect, value.Chain)
}
})
}))
// TODO(namespaces): add a test
expectTarget_DC2 := newTarget("web", "", "default", "dc2")
expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
}
expectModifiedWithOverrides := &structs.CompiledDiscoveryChain{
ServiceName: "web",
Namespace: "default",
Datacenter: "dc1",
Protocol: "grpc",
CustomizationHash: "98809527",
StartNode: "resolver:web.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:web.default.dc1": &structs.DiscoveryGraphNode{
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "web.default.dc1",
Resolver: &structs.DiscoveryResolver{
ConnectTimeout: 22 * time.Second,
Target: "web.default.dc1",
Failover: &structs.DiscoveryFailover{
Targets: []string{"web.default.dc2"},
},
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"web.default.dc1": newTarget("web", "", "default", "dc1"),
expectTarget_DC2.ID: expectTarget_DC2,
},
}
require.True(t, t.Run("POST: read modified chain with overrides (camel case)", func(t *testing.T) {
body := ` {
"OverrideMeshGateway": {
"Mode": "local"
},
"OverrideProtocol": "grpc",
"OverrideConnectTimeout": "22s"
} `
req, err := http.NewRequest("POST", "/v1/discovery-chain/web", strings.NewReader(body))
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
require.NoError(t, err)
value := obj.(discoveryChainReadResponse)
require.Equal(t, expectModifiedWithOverrides, value.Chain)
}))
require.True(t, t.Run("POST: read modified chain with overrides (snake case)", func(t *testing.T) {
body := ` {
"override_mesh_gateway": {
"mode": "local"
},
"override_protocol": "grpc",
"override_connect_timeout": "22s"
} `
req, err := http.NewRequest("POST", "/v1/discovery-chain/web", strings.NewReader(body))
require.NoError(t, err)
resp := httptest.NewRecorder()
obj, err := a.srv.DiscoveryChainRead(resp, req)
require.NoError(t, err)
value := obj.(discoveryChainReadResponse)
require.Equal(t, expectModifiedWithOverrides, value.Chain)
}))
}
|
Computer tomography for nondestructive testing in the automotive industry The application of computer tomography (CT) for non-destructive testing is of continuing interest to research and industry alike, as economic pressure is ever increasing on production processes. Three concurring goals drive the development of CT, namely: It has to be fast, cheap and precise. With a fast CT-system, the technique can not only be used for error analysis and precision measurements, but also for the application as a standard tool in the production line for the complete quality control of parts. At the Robert Bosch corporate research centre in Stuttgart, Germany, we have set up a CT-system, that allows us to conduct experiments towards these goals and to test and develop the latest software for the reconstruction of x-ray images. One of our main challenges is to use CT for reverse engineering processes and to create computer assisted design (CAD) models from measured data. For this application often a coordinate measurement machine (CMM) is used that gathers a cloud of data points by optical inspection. However, for many parts the inside of the object is relevant. Here CT has the unique advantage of delivering volumetric data. Once the process of the generation of a cloud of data points can be achieved with high precision, standard reverse engineering CAD software can be used to determine the dimensions of the interior structure of an object. This paper describes the use of CT for non-destructive testing at Robert Bosch GmbH, the accuracy limits for the measurement of volumetric data and the classification and analysis of material defects. Furthermore, it highlights the ongoing research to make CT fast, exact and cheap, and to enable its utilisation for 100% testing of parts at the end of a production line. |
Higher command and staff course staff ride paper: Who should bear primary responsibility for the culmination of Patton's US third army on the Moselle in 1944? Are there lessons for contemporary campaign planning? In the month since becoming operational, General George S. Patton's US Third Army had swept hundreds of miles across France in pursuit of the retreating German Army in a remarkable example of Blitzkrieg operations. On September 1944 it ran out of fuel, just 30 miles short of the Moselle. With German resistance crumbling across his whole front, Eisenhower gave the logistic priority to Field Marshal Sir Bernard Montgomery's advance to the Ruhr. Fully aware of his stretched lines of communication and the limited transport available, Patton refused to accept his circumstances and urged his army forward, in the hope that Eisenhower would be forced to give him more fuel. But Montgomery's advance to the Ruhr was too important to be jeopardized by Patton's antics and culmination became inevitable for Third Army. |
A new Justice League trailer dropped and it was… well, it was still pretty awful but slightly less. There’s been some obvious tweaks from Joss Whedon now that Snyder has left the helm. Not crazy earth shattering changes, but enough to where you can see a slight change of tone and color grading.
Oh, the comic. That’s right. Today’s comic is based off that trailer, particularly the end where Alfred is talking to some mysterious stranger that is TOTALLY NOT SUPERMAN YOU GUYS. Alfred is doing his thing and the viewer is supposed to go, “WHO CAN IT BE?!” Anyway, a pal of mine made a remark about it being the T-Rex in Batman’s cave and I was laughing silly. You see, a stomp happened and some water shook and in the batcave is- well you get the point. So, yeah. Funny joke, Everyone laughs or something. Aquabro jumping through that building looked like PS3 graphics.
Special thanks to Steven Perrell for inspiring today’s business.
If you enjoy these comics and our podcast, why not take the next step and join our PATREON? By becoming a Patron you’ll have access to early comics, behind the scenes stuff, our weekly pinups and what have you. There’s also some cool merch and perks too. |
package me.qinmian.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface ExcelRowCell {
String value() default "";
/**是否是单个单元格,false代表为合并单元格
* @return
*/
boolean isSingle() default false;
boolean autoCol() default false;
int startRow() default -1;
int endRow() default -1;
int startCol() default -1;
int endCol() default -1 ;
short rowHightInPoint() default 25;
ExportCellStyle cellStyle() default @ExportCellStyle();
}
|
from django.contrib.auth import logout
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib.auth.views import redirect_to_login
from battles.models import Battle
class TrainerIsNotOpponentMixin(UserPassesTestMixin):
permission_denied_message = "User, you've already select your team."
def test_func(self):
user = self.request.user
battle_id = self.request.GET.get('id')
battle = Battle.objects.get(id=battle_id)
if (battle.trainer_opponent != user or battle.status == 'SETTLED'):
return False
return True
def handle_no_permission(self):
if self.request.user.is_authenticated:
logout(self.request)
return redirect_to_login(
self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
|
import React, { ChangeEvent } from "react";
import { Input } from "@govuk-jsx/input";
import { i18n } from "../../i18n";
interface Props {
errors: any;
handleTitleInputBlur: (event: ChangeEvent<HTMLInputElement>) => void;
title: string;
}
export const FormDetailsTitle = (props: Props) => {
const { title, errors, handleTitleInputBlur } = props;
return (
<Input
id="form-title"
name="title"
label={{
className: "govuk-label--s",
children: [i18n("Title")],
}}
onChange={handleTitleInputBlur}
defaultValue={title}
errorMessage={
errors?.title ? { children: errors.title.children } : undefined
}
/>
);
};
|
Comparing Patient and Provider Perceptions of Engagement and Care in Chronic Diseases The continuing professional development (CPD) community recognizes that optimal health care outcomes depend on coproduction, defined by the partnerships that patients and providers form in processes such as setting goals, making treatment decisions, and assessing care quality andoutcomes. To develop programs that effectively support provider and patient coproduction, CPD professionals must first identify relevant gaps and needs.Generally, our approach to gap analysis and needs assessment for coproduction involves conducting survey studies to compare patients and providers disease-specific treatment goals, relative knowledge and educational needs, perceptions, barriers, and behaviors. This approach addresses the current lack of comprehensive patient-provider survey studies on various chronic diseases in the literature. Through survey studies approved by independent institutional review boards, we sought to assess and compare patient provider perceptions regarding hepatitis C and chronic obstructive pulmonary disease (COPD). |
<reponame>ryanrapp/split-synchronizer
package redis
import (
"fmt"
"strings"
)
//Splits
const _splitNamespace = "SPLITIO.split.%s"
const _splitsTillNamespace = "SPLITIO.splits.till"
//Segments
const _segmentsRegisteredNamespace = "SPLITIO.segments.registered"
const _segmentTillNamespace = "SPLITIO.segment.%s.till"
const _segmentNamespace = "SPLITIO.segment.%s"
//Impressions
//SPLITIO/{sdk-language-version}/{instance-id}/impressions.{featureName}
const _impressionsNamespace = "SPLITIO/%s/%s/impressions.%s"
//Metrics
//SPLITIO/{sdk-language-version}/{instance-id}/latency.{metricName}.bucket.{bucketNumber}
const _metricsLatencyNamespace = "SPLITIO/%s/%s/latency.%s.bucket.%s"
//SPLITIO/{sdk-language-version}/{instance-id}/count.{metricName}
const _metricsCounterNamespace = "SPLITIO/%s/%s/count.%s"
//SPLITIO/{sdk-language-version}/{instance-id}/gauge.{metricName}
const _metricsGaugesNamespace = "SPLITIO/%s/%s/gauge.%s"
//Events
const _eventsListNamespace = "SPLITIO.events"
//Impressions
const _impressionsQueueNamespace = "SPLITIO.impressions"
type prefixAdapter struct {
prefix string
}
func (p prefixAdapter) setPrefixPattern(pattern string) string {
if p.prefix != "" {
return strings.Join([]string{p.prefix, pattern}, ".")
}
return pattern
}
func (p prefixAdapter) splitNamespace(name string) string {
return fmt.Sprintf(p.setPrefixPattern(_splitNamespace), name)
}
func (p prefixAdapter) splitsTillNamespace() string {
return fmt.Sprint(p.setPrefixPattern(_splitsTillNamespace))
}
func (p prefixAdapter) segmentsRegisteredNamespace() string {
return fmt.Sprint(p.setPrefixPattern(_segmentsRegisteredNamespace))
}
func (p prefixAdapter) segmentTillNamespace(name string) string {
return fmt.Sprintf(p.setPrefixPattern(_segmentTillNamespace), name)
}
func (p prefixAdapter) segmentNamespace(name string) string {
return fmt.Sprintf(p.setPrefixPattern(_segmentNamespace), name)
}
func (p prefixAdapter) impressionsNamespace(languageAndVersion string, instanceID string, featureName string) string {
return fmt.Sprintf(p.setPrefixPattern(_impressionsNamespace), languageAndVersion, instanceID, featureName)
}
func (p prefixAdapter) metricsLatencyNamespace(languageAndVersion string, instanceID string, metricName string, bucketNumber string) string {
return fmt.Sprintf(p.setPrefixPattern(_metricsLatencyNamespace), languageAndVersion, instanceID, metricName, bucketNumber)
}
func (p prefixAdapter) metricsCounterNamespace(languageAndVersion string, instanceID string, metricName string) string {
return fmt.Sprintf(p.setPrefixPattern(_metricsCounterNamespace), languageAndVersion, instanceID, metricName)
}
func (p prefixAdapter) metricsGaugeNamespace(languageAndVersion string, instanceID string, metricName string) string {
return fmt.Sprintf(p.setPrefixPattern(_metricsGaugesNamespace), languageAndVersion, instanceID, metricName)
}
func (p prefixAdapter) eventsListNamespace() string {
return fmt.Sprint(p.setPrefixPattern(_eventsListNamespace))
}
func (p prefixAdapter) impressionsQueueNamespace() string {
return fmt.Sprint(p.setPrefixPattern(_impressionsQueueNamespace))
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .lib.testrail import APIError
class User:
__module__ = "testrail_yak"
def __init__(self, api):
self.client = api
def get(self, user_id: int):
"""Get a TestRail user by user_id.
:param user_id: user ID of the user we want to grab
:return: response from TestRail API containing the user
"""
try:
result = self.client.send_get(f"get_user/{user_id}")
except APIError as error:
print(error)
raise UserException
else:
return result
def get_by_email(self, email_addr: str):
"""Get a TestRail user by email.
:param email_addr: email address of the user we want to grab
:return: response from TestRail API containing the user
"""
try:
result = self.client.send_get(f"get_user_by_email&email={email_addr}")
except APIError as error:
print(error)
raise UserException
else:
return result
def get_all(self):
"""Get a list of TestRail users.
:return: response from TestRail API containing the user collection
"""
try:
result = self.client.send_get("get_users")
except APIError as error:
print(error)
raise UserException
else:
return result
class UserException(Exception):
pass
|
High Rates of Treatment Stage Migration For Early Hepatocellular Carcinoma and Its Association With Adverse Impact On Outcomes: A Multicenter Study Background & Aims: The rate of contraindications to percutaneous ablation (PA) for inoperable early HCC, and subsequent outcomes is not well described. We investigated the prevalence and outcomes of inoperable early HCC patients with contraindications to PA, resulting in treatment stage migration (TSM). Methods: BCLC 0/A patients diagnosed between September 2013 and September 2019 across ve hospitals were identied. Primary endpoint was proportion of BCLC 0/A HCCs with contraindications to PA. Secondary endpoints included overall survival (OS), local tumour control (LTC) and recurrence-free survival (RFS). The causal effects of PA versus TSM were assessed using a potential outcome means (POM) framework in which the average treatment effects (ATE) of PA were estimated after accounting for potential selection bias and confounding. 245 patients with inoperable BCLC 0/A HCC were identied. 140 (57%) had contraindications to PA and received TSM therapy, 105 (43%) received PA. The main contraindication to was dicult tumour location (47%). Patients who received TSM therapy had lower median OS (2.1 versus 5.3years), LTC (1.0 versus 4.8years), and RFS (0.8 versus 2.7years); p<0.001 respectively, compared to PA. The ATE for PA versus TSM yielded an additional 1.02years (p=0.048), 2.87years (p<0.001) and 1.77years (p<0.001) for OS, LTC and RFS respectively. 3-year LTC after PA was suboptimal (63%). Our study highlights high rates of contraindication to PA in early HCCs, resulting in TSM and poorer outcomes. The local recurrence rate after PA was also high. Both ndings support exploration of alternative ablative options for early HCCs. Introduction Liver cancer is the fourth leading cause of cancer-related death globally. Hepatocellular carcinoma (HCC) is the most common type of primary liver malignancy, accounting for up to 80% of all cases worldwide, as well as in Australia. Over the last four decades, the HCC incidence rate in Australia has increased by almost eight-fold, from 1.37 per 100 000 in 1982 to 8.60 per 100 000 in 2019, which represents the fastest rising incidence of any cancer in Australia and therefore a signi cant challenge for the healthcare system. Due to increased uptake of ultrasound surveillance in at-risk individuals, HCC is being more frequently diagnosed at an earlier, curable stage in some jurisdictions. Barcelona Clinic Liver Cancer (BCLC) classi cation is a widely accepted HCC staging system for prognosis assessment and treatment allocation. In patients diagnosed with very early/early-stage HCC (BCLC stage 0/A), curative surgical therapies such as liver transplantation or resection are recommended. Unfortunately, as little as 30% of patients are candidates for surgery at diagnosis. Percutaneous ablation (PA) is the current standard curative therapy for early HCC patients ineligible for surgical therapy. However, when PA therapy cannot be given to patients with early stage HCC, patients commonly advance to non-curative treatments (treatment stage migration). The rate of contraindication to PA and subsequent patient outcomes, for receiving non-curative therapies based on treatment stage migration (TSM) concept is not well described. Several retrospective studies have reported high rates of contraindication to PA (34-43%) in early HCC patients who were ineligible for surgery. Emerging data supports the effectiveness of alternative ablative options such as stereotactic body radiation therapy (SBRT), however, its incorporation into consensus guidelines is variable. The current EASL HCC management guidelines cite a lack of robust evidence to support its use. Therefore, the aim of this work was to perform a retrospective multicenter study to evaluate (i) the proportion of inoperable early-stage HCC referred for PA but ineligible due to contraindications; (ii) the clinical impacts on survival in patients who had contraindications to PA and experienced TSM; and (iii) the local tumour control following PA in a large, real-world cohort. To estimate the causal treatment effects using this observational dataset, we used a potential outcome mean approach, with inverse probability of treatment weights used to correct for missing data on the potential outcomes and selection bias. Study population All patients diagnosed with HCC between September 2013 to September 2019 and managed at ve tertiary hospitals across South Australia and Northern Territory were identi ed retrospectively from relevant hospital-based electronic and paper medical records. HCC was diagnosed based on typical radiological ndings using multiphasic contrast-enhanced CT or MRI, and/ or pathological con rmation according to the EASL criteria. Inclusion criteria were: BCLC stage 0 or A HCC, ineligible for surgical therapy (transplantation/resection) as decided by the HCC multidisciplinary meeting, received either PA or TSM therapy including transarterial chemoembolization (TACE), selective internal radiation therapy (SIRT), stereotactic body radiotherapy (SBRT), systemic therapy or best supportive care as the initial treatment following HCC multidisciplinary meeting recommendation, minimal follow up ≥3 months. Exclusion criteria were: incomplete/missing critical data such as BCLC stage of HCC and treatment date, received combination therapies such as TACE + ablation as the initial treatment, received rst treatment TACE or ablation as bridging therapy for liver transplant performed within 6 months. Treatment allocation was via two HCC multidisciplinary teams associated with two major tertiary hospitals in South Australia. Each team contained hepatologists, hepatobiliary and transplant surgeons, liver specialized interventional radiologists, medical and radiation oncologists, and liver cancer nurses. These teams received all referrals across South Australia and Northern Territory, a population of approximately two million people. PA techniques included: radiofrequency ablation, microwave ablation or percutaneous ethanol injection. The rate and contraindications to PA were assessed in early HCC patients who were deemed ineligible for surgical therapy. The technique of TACE procedure performed was conventional TACE (cTACE), using epirubicin, followed by embolization with gelfoam or polyvinyl alcohol particles. Study endpoints The primary endpoint of our study was the proportion of BCLC 0/A HCC with contraindications to PA. Secondary endpoints included overall survival (OS), local tumour control (LTC) and recurrence-free survival (RFS) between the PA and TSM groups. Overall survival was de ned as the time from the date of rst treatment to the date of death or last follow up if alive. Local tumour control was de ned as absence of tumour progression of the treated target lesion/s on CT/ MRI after the rst treatment. Recurrence-free survival was calculated from the date of rst treatment to the date of tumour recurrence or last follow up/death date if no recurrence was detected, in patients who achieved complete tumour response after rst treatment. Patients who received best supportive care or SBRT as TSM therapy were excluded from the analyses of LTC and RFS. Patients who received SBRT were excluded from these analyses due to low numbers in the study and emerging evidence suggesting this is potentially a curative therapy. Each tumour nodule treated was assessed on follow up imaging to determine LTC rate. Response Evaluation Criteria in Solid Tumours version 1.1 (RECIST 1.1) criteria was used to assess tumour response post treatment based on the radiological assessment with multiphasic contrastenhanced CT or MRI. Complete tumour response (CR) was de ned as the disappearance of all target lesion/s in the axial imaging plane and/or absence of arterial phase hyperenhancement at the rst radiological assessment after treatment. Statistical methods Continuous variables were summarised using median ± interquartile ranges (IQR) or mean ± standard deviations (SD) and compared by Student's t-test or Mann-Whitney U test. Categorical variables were summarized as frequencies (percentages) and compared using Chi-squared or Fisher's exact test as appropriate. The unadjusted estimated probability of survival (OS, RFS) and LTC was described using the Kaplan-Meier (KM) method and compared between treatment groups using the log-rank test. We assessed the causal average treatment effect (ATE) for PA versus TSM therapy using a Potential Outcome Mean (POM) survival analysis. POM is a 2-stage approach to estimating causal treatment effects with estimation of the probability of receiving a particular treatment using a logit regression model used in the rst step followed by estimation of the mean time to outcome using a Weibull censoring model. The probabilities of treatment are used as inverse probability of treatment weights (IPTW) in the survival model to create a pseudo-population in which there is balance in treatment probability between the 2 treatment groups. As such the observed associations can be treated as causal treatment effects. This approach to analysis allows estimation of both the average time to the outcome if a single treatment were used for the whole study population, as well as the average treatment effect (ATE), being the difference in mean time to outcome if all subjects were treated with PA versus TSM therapy. Model covariates for both the treatment and censoring models included age, gender, cirrhosis, Child-Pugh score, AFP, MELD score, number of tumours, tumour size, and alcohol aetiology. Results are reported as mean (95% CI) time to outcome (POM) and mean (95% CI) average treatment effect (ATE). POM and ATE estimates were obtained using version 16 of Stata's "stteffects" command (StataCorp, USA). A 2-tailed Type 1 error rate of alpha = 0.05 was used for signi cance testing. Ethical approval and data collection proportion of BCLC stage 0 (47% versus 9%), lower median alpha-fetoprotein (AFP) (6 g/L versus 7.5 g/L) and lower median MELD score (9 versus 10). Failed to receive eligible percutaneous ablation treatment 1 1% Bi-lobar distribution of tumours 7 5% Reason not speci ed 3 2% numbers exceed 140 as some patients had additional second contraindications Of the 105 PA treatments, the most common modality used was microwave ablation (97 treatments), followed by ethanol ablation (7 treatments) and radiofrequency ablation (1 treatment). TACE was the most common TSM therapy (116 patients; 82.9%). Other TSM therapies included SIRT (2 patients), SBRT (3 patients), systemic therapy (1 patients) and best supportive care (8 patients). SIR-Spheres® Y-90 resin microspheres was the standard radioembolisation technique performed at our centres. Sorafenib was the systemic therapy given in all cases. 9 patients referred for TACE and 1 patient referred for PA had unsuccessful procedure attempt or procedure postponed due to acute illness, and subsequently received best supportive care due to contraindications. Overall survival (OS) Of the 245 study patients, 107 died within the follow up period, with the median OS of 3.5 years for the whole study cohort (95% CI 2.33-4.45). Patients who received TSM therapy as the initial treatment had signi cantly shorter median OS when compared with the ablation group (2.1 years vs 5.3 years), log-rank 2 = 18.85, p < 0.001) (Fig. 2). The survival rates at 1, 3, and 5 years were also shorter in the TSM group (Table 3). log-rank 2 = 35.38, p < 0.001) (Fig. 3). The LTC rates at 1, 3, and 5 years were also signi cantly lower in tumours treated with TSM therapy ( Table 3). The 3 years LTC rates for PA versus TSM therapy were 63.1% versus 18.8%, respectively (p < 0.001). Recurrence-free survival (RFS) Patients who received TSM therapy as the initial treatment had signi cantly lower RFS when compared with the ablation group (0.8 vs 2.7 years; log-rank 2 = 34.84, p < 0.001) (Fig. 4). The RFS rates at 1, 3, and 5 years were also signi cantly lower in the TSM group (Table 3). Potential Outcome Mean (POM) and Average Treatment Effect (ATE) The results for the potential outcome mean survival time and average treatment effects for each outcome are shown in Table 4. The potential mean outcome time for the whole study population, were it treated using TSM therapy for OS, LTC and RFS, were 1.77, 0.89 and 0.86 years respectively. There was a signi cant average treatment effect for PA versus TSM therapy for each outcome, with an additional 1.02 years (p = 0.048), 2.87 years (p < 0.001) and 1.77 years (p < 0.001) for OS, LTC and RFS respectively, if the whole population were treated with PA versus TSM therapy (Table 4). Discussion Although a TSM strategy for treating HCC in early-stage, potentially curable HCC patients is likely to be common in real-world practice, its frequency and impact on outcomes is not well described. Our study reported a high rate of contraindications to PA (57.1%) in early-stage HCC patients who were ineligible for surgery. This is supported by other studies reporting similarly high rate of contraindications to PA (34-43%), which further validates that our study ndings are likely to re ect real-world practice. The two main contraindications to PA were di cult tumour location (47%) and large tumour size (> 3cm) (46%) in our study cohort. Our high rate of contraindications demonstrates the frequent technical limitations of PA therapy for early stage HCC in real-world practice, which is underappreciated in our view. A subsequent problem related to this high rate of contraindications to PA is TSM to non-curative therapy. In this cohort, 57% of inoperable patients with early-stage HCC had TSM therapies. To the best of our knowledge, our study is the rst to compare important oncological outcomes in this speci c subgroup of surgically inoperable early-stage HCC patients who received the curative therapy (PA) versus patients who received non-curative TSM therapies as the initial treatment. The outcomes in those who received TSM were signi cantly poorer with lower OS, LTC and RFS. Although patients in the TSM group had a number of adverse clinical and tumour characteristics, the TSM group remained an independently associated variable for poor outcome (OS, LTC, RFS) when adjusted for other relevant clinical factors. Another concerning nding from our study was the relatively high local recurrence rate for small HCC tumours treated with PA. Although PA is considered a curative therapy, we found that patients treated with PA had high rates of local recurrence at 1, 3, and 5 years (14%, 37% and 55% respectively). These high local recurrence rates could not be explained by poor selection as all tumours treated were ≤3 cm in maximal diameter, the standard accepted indication for PA treatment. Moreover, all interventional radiologists involved in the PA treatments for our study cohort were liver specialized and experienced with this technique. Although early randomised studies have reported better outcomes for local recurrence, we believe the local recurrence rates for PA reported in our study are more re ective of the real-world practice and are supported by a number of recent studies reporting similarly high local recurrence rates (23-54%) within 3 years. The explanation for the high local recurrence after PA is unclear but likely relates to technical factors such as suboptimal tumour visibility under USS guidance during PA, challenging tumour location/ subphrenic region, leading to incomplete tumour ablation, or varying de nitions of local failure of tumour control. Taken together, these study ndings demonstrate vulnerabilities associated with current HCC treatment algorithms using PA for surgically inoperable early-stage HCC, when implemented in real-world settings. Patients who are not eligible for surgical therapies are effectively placed in "double jeopardy" when referred for PA. The rst risk they face is not being eligible for PA with subsequent migration of stage and treatment to non-curative therapies associated with poorer LTC and survival. The second risk they face is from the frequent failure after PA to provide effective LTC. The purpose of this paper is to highlight the limitations of PA within current algorithms in clinical practice, which in our view do not appear to be su ciently recognized. These limitations suggest the need for randomized controlled trials of alternative, potentially curative treatments for inoperable patients with early-stage HCC. In our view, a leading candidate for such trials is SBRT. Recent non-randomized studies and systemic review investigating e cacy of SBRT in BCLC 0/A patients have suggested excellent LTC rates (> 90%) up to 3 years and equivalent OS in comparison to PA. In addition, SBRT offers a number of other potential advantages including its ability to treat lesions in di cult locations (close to diaphragm, vessels and biliary structures) and lesions > 5 cm, and its non-invasive nature with no tumour seeding risk, and delivery in an outpatient setting. There are several limitations of our study. Firstly, its retrospective and non-randomized design limits our ability to exclude selection bias, with differences in baseline clinical characteristics potentially explaining the poorer outcomes in the TSM group. To overcome this potential bias, we used a POM approach which allowed for estimation of marginal treatment effects rather the typical estimation of conditional treatment effects using a standard Cox regression approach. However, the potential for selection bias remains a possibility, to the extent that unobserved cofounders were not included in the model for treatment assignment. Despite this limitation, the ranges of the ATE's were all within the range of biological plausibility. A further limitation was likely heterogeneity in the technical aspects of PA and TACE delivery and radiology reporting by multiple care providers across different centres. Nevertheless, this heterogeneity is re ective of real-world practice in many healthcare centres. Major study strengths include a large patient cohort and multicentre design. A further strength is that treatment allocation was by HCC multidisciplinary teams and according to current BCLC treatment algorithms. Conclusions This real world, multicenter study con rmed that there was a high rate of contraindications to PA, associated with treatment stage migration and poorer outcomes for these patients. Despite being considered a "curative" therapy, PA was also associated with a high rate of local recurrence. These ndings support the need for randomized controlled trials comparing outcomes between PA and alternative ablative therapies such as stereotactic radiotherapy, as primary therapy for inoperable, earlystage HCC patients. Declarations Con ict of interest statement: nothing to declare Financial support statement: nothing to declare Availability of data and material: All data generated or analysed during this study are included in this published article; the datasets generated during and/or analysed during the current study are available from the corresponding author on reasonable request. Consent for publication: Not applicable; all data reported did not include patient's identifying details. |
# Copyright (C) 2014 Sony Mobile Communications Inc.
# All rights, including trade secret rights, reserved.
import vcsjob
import tests.testjob
import tests.logger
from tests.common import StdOutRedirector
import sys
import traceback
def all_testjob():
redirector = StdOutRedirector()
redirector.redirect()
res = True
res &= tests.testjob.t1()
res &= tests.testjob.t2()
res &= tests.testjob.t3()
res &= tests.testjob.t4()
res &= tests.testjob.t5()
res &= tests.testjob.t6()
res &= tests.testjob.t7()
redirector.reset()
return res
def all_testjob_android():
redirector = StdOutRedirector()
redirector.redirect()
res = True
try:
res &= tests.testjob_android.t01(redirector)
res &= tests.testjob_android.t02()
res &= tests.testjob_android.t03()
res &= tests.testjob_android.t04()
res &= tests.testjob_android.t05()
res &= tests.testjob_android.t06()
res &= tests.testjob_android.t07()
res &= tests.testjob_android.t08()
res &= tests.testjob_android.t09()
res &= tests.testjob_android.t10(redirector)
res &= tests.testjob_android.t11(redirector)
res &= tests.testjob_android.t12()
res &= tests.testjob_android.t13(redirector)
res &= tests.testjob_android.t14(redirector)
res &= tests.testjob_android.t15(redirector)
res &= tests.testjob_android.t16(redirector)
res &= tests.testjob_android.t17()
res &= tests.testjob_android.t18(redirector)
res &= tests.testjob_android.t19(redirector)
except Exception, e:
sys.stderr.writelines("%s\n" % str(e))
_, _, _tb = sys.exc_info()
stack_trace = traceback.extract_tb(_tb)
print('Stack trace')
for stack_item in stack_trace:
print(' %s' % str(stack_item))
std_out=redirector.get_content()
sys.stderr.writelines("Final std out:\n%s\n" % std_out)
res = False
finally:
redirector.reset()
return res
def all_logger():
redirector = StdOutRedirector()
redirector.redirect()
res = True
res &= tests.logger.t1()
res &= tests.logger.t2()
res &= tests.logger.t3()
res &= tests.logger.t4()
redirector.reset()
return res
|
<reponame>DYongLi/DizPay<filename>demos/java/diz-pay/src/main/java/diz/pay/common/exception/PayException.java
package diz.pay.common.exception;
import diz.pay.common.JsonResult;
import diz.pay.common.enums.ResultEnums;
public class PayException extends RuntimeException {
private static final long serialVersionUID = 1L;
private Integer code;
public Integer getCode() {
return code;
}
public void setCode(int code) {
this.code = code;
}
public PayException(String message, Throwable cause) {
super(message, cause);
}
public PayException(int code, String message, Throwable cause) {
super(message, cause);
this.code = code;
}
public PayException(String message) {
super(message);
}
public PayException(int code, String message) {
super(message);
this.code = code;
}
public PayException(JsonResult jsonResult) {
super(jsonResult.getMsg());
this.code = jsonResult.getCode();
}
public PayException(ResultEnums resultEnums) {
super(resultEnums.getMsg());
this.code = resultEnums.getCode();
}
public static PayException newInstance(String message) {
return new PayException(message);
}
public static PayException newInstance(int code, String message) {
return new PayException(code, message);
}
public static PayException newInstance(String message, Throwable cause) {
return new PayException(message, cause);
}
public static PayException newInstance(int code, String message, Throwable cause) {
return new PayException(code, message, cause);
}
public static PayException newInstance(ResultEnums result) {
return new PayException(result.getCode(), result.getMsg());
}
public static PayException newInstance(ResultEnums result, Throwable cause) {
return new PayException(result.getCode(), result.getMsg(), cause);
}
}
|
Minnesota Intercollegiate Athletic Conference
History
On March 15, 1920, a formal constitution was adopted and the Minnesota Intercollegiate Athletic Conference with founding members Carleton College, Gustavus Adolphus College, Hamline University, Macalester College, Saint John's University, St. Olaf College, and the College of St. Thomas (now University of St. Thomas).
Concordia College joined the MIAC in 1921, Augsburg University in 1924, and Saint Mary's University in 1926. Carleton dropped membership in 1925, rejoining in 1983. St. Olaf left in 1950, returning in 1975. The University of Minnesota Duluth was a member of the MIAC from 1950 to 1975. Bethel University joined in 1978. The MIAC initiated women's competition in the 1981–82 season. Two all-women's schools subsequently joined the conference, St. Catherine University in 1983 and the College of St. Benedict in 1985.
The conference did not play sports from the fall 1943 to the spring of 1945 due to World War II. Saint Mary's discontinued its football program in 1955. Macalester football left the conference in 2002, but still retains its MIAC membership in other sports. St. Catherine and St. Benedict, being both women's colleges, also do not sponsor football. Together with Saint John's, one of only a handful of men's colleges, St. Benedict forms a joint academic institution, known commonly by the initialism CSB/SJU.
From 1947 to 2003 the MIAC had a strong men's wrestling program, which was discontinued following the 2002–03 season. The strongest teams over the history of the conference were Augsburg with 31 team championships, and Saint John's with 14 team championships. The MIAC teams and individual wrestlers demonstrated a strong national and Olympic presence in the 1970s and beyond.
On May 22, 2019, the MIAC involuntarily removed the University of St. Thomas from membership effective at the end of spring 2021. |
// HTTPStatus returns the http status code associated with the error
func (c code) HTTPStatus() int {
switch c {
case codeAuth:
return http.StatusBadRequest
case codeInternal:
return http.StatusInternalServerError
case codeResourceNotFound:
return http.StatusNotFound
case codeMethodNotAllowed:
return http.StatusMethodNotAllowed
case codeResourceDuplicate:
return http.StatusConflict
case codeBadData, codeDecodeJSON:
return http.StatusBadRequest
default:
panic(stderr.New("unknown http status code for this error"))
}
} |
Over the years, enhancements have been made to rearview mirrors for vehicles by using lights in conjunction with such mirrors to provide a number of lighted auxiliary features. For example, rearview mirrors can incorporate lighted auxiliary features including, but not limited to, turn signals, blind spot detection displays (“BSDDs”), hazard warning lights, brake lights, or parking assist lights. Rearview mirror enhancements such as these have safety benefits, and are desirable to vehicle drivers for a variety of reasons. But there are a number of challenges to designing an optimal, efficient, and cost effective lighting assembly for such applications.
The challenges presented in this field generally relate to the small space constraints within the housing of the rearview mirror and the functionality of the mirror itself and the lighted auxiliary features. Specifically, lighting assemblies used for such features must be sized to fit behind the mirror, but within the mirror housing. The lighting assemblies must also be small and limit mirror vibration. A variety of light sources can be used, but light-emitting diodes (“LEDs”) are a common light source because they are small and generate large amounts of light, with lower energy consumption and heat generation, relative to their size. Any type of lights, however, generate heat and use energy. Thus, it is desirable to use as few lights or LEDs as possible to avoid excess heat and energy use. One additional aspect of designing lighted displays for rearview mirrors is that the visible light emitted from lighted auxiliary features should be sufficiently outside of or inside of the vehicle operator's line of sight during regular vehicle operation to avoid interfering with or to augment safe operation of the vehicle.
Early lighted auxiliary features for rearview mirrors were accomplished by using a simple “tilted LED” design, such as that disclosed in U.S. Pat. No. 6,257,746. In an embodiment of the tilted LED design, the lighting assembly is comprised of lights, such as LEDs, mounted on a substrate. The lights are positioned in an oblique orientation relative to the mirror, that is, they are “titled” away from the vehicle operator's line of sight and are not pointing directly through the mirror. The tilted LED design also uses a light diverting substrate which substantially prevents the lighting assembly from being visible when the lights are unlit. In this design, one LED or light is required for each aperture through which light passes out of the mirror. Although this design is still currently used, the lighting assembly in this design uses multiple LEDs and therefore a substantial amount of energy and also takes up a relatively substantial space.
As an alternative to the tilted LED design, optic assemblies have also been used to provide the lighting assembly for lighted auxiliary features in rearview mirrors. One early type of optic assembly is disclosed in U.S. Pat. No. 6,045,243, which is an example of the “Fresnel and deviator” design. The Fresnel and deviator optic design uses two or more optical elements to substantially converge and redirect light from light sources through the mirror in a way that does not interfere with the line of sight of the vehicle driver. In this design, the light emitting portion of the light source is either positioned facing the mirror or obliquely thereto. Light from the light source is first substantially collected and converged by a lens which may have refracting portions, reflective portions, or both. The collected light rays are then diverted by an optical element or elements (i.e., the “deviator”) by a certain amount, for example, 20-40 degrees from a line positioned normal to the front surface of the mirror, so that the light which is ultimately utilized by the lighted auxiliary feature and ultimately passed through the mirror does not interfere with the lines of sight of the vehicle operator. The Fresnel and deviator design may use a variety of different light sources, ranging from LEDs to light bulbs, but this design requires a relatively large amount of light and therefore uses a large amount of energy, emits a relatively large amount of heat and uses a relatively large amount of space.
Another type of optic assembly used is the “Paralocs” design that is disclosed in U.S. Pat. No. 6,076,948, and other patents. “Paralocs” is an abbreviation for Parabolic Array of LEDs on a Cut-Out Substrate. In the Paralocs design, the light sources used, typically LEDs, generally face away from the mirror, unlike the Fresnel and deviator design in which the light sources face the mirror. In the Paralocs design, light from the light source is substantially converged and directed at the same time with the use of a parabolic-shaped reflector. This design usually uses one reflector for each light source. Planar redirecting facets have also been used to increase efficiency and uniformity on Paralocs optics. This type of faceting redirects light that has already been significantly converged by another part of the optic. This allows the optic designer to use light from the far side of the LED, which would ordinarily be unused. However, the Paralocs design still requires one LED or light source per aperture through which light is emitted, and one reflector facet per LED or light source, and therefore still has some of the disadvantages of the earlier optical techniques.
Another type of optic used is referred to as a “Half Optic,” and is described in U.S. Pat. No. 7,273,307. An example of this type of optic uses a small reflector to direct and converge light from a light source through an aperture. The light source in the Half Optic design faces the mirror and is also positioned quite close to the mirror. This design has special utility when trying to direct light at angles very close to the mirror surface, but has some of the same disadvantages regarding the number of LEDs or light sources required, and permits less sophisticated control over the direction of the light rays through the aperture.
While the aforementioned designs provide ways to accomplish a number of lighted auxiliary features in rearview mirrors, the aspects of size, cost, and efficiency of the light assemblies used still have not been fully optimized. As such, there remains a need for a lighting assembly that will reduce the cost and size of lighted auxiliary features in rearview mirrors. The optic assembly for mirrors of the present invention addresses many of these problems. When utilizing the techniques described herein, the optic assembly of the present invention will allow using as little as one LED for lighted auxiliary displays that previously required 4 to 9 LEDs. It will also allow a significant reduction in assembly size, as well as a significant reduction in power consumption and generation of heat.
It will be understood by those skilled in the art that one or more aspects of this invention can meet certain objectives, while one or more other aspects can lead to certain other objectives. Other objects, features, benefits and advantages of the present invention will be apparent in this summary and descriptions of the disclosed embodiments, and will be readily apparent to those skilled in the art. Such objects, features, benefits and advantages will be apparent from the above as taken in conjunction with the accompanying figures and all reasonable inferences to be drawn therefrom. |
/**
* @author Alexey Kiselyov
* Date: 25.08.11
*/
public class Profiler {
private static final Logger log = LoggerFactory.getLogger(Profiler.class);
public static final String POLL_INTERVAL = "pollInterval";
public static final long DEFAULT_POLL_INTERVAL = 2000L;
private SamplingProfiler samplingProfiler;
public Profiler() {
}
public void startPolling() throws InterruptedException {
this.samplingProfiler.startPolling();
}
public void stopPolling() {
this.samplingProfiler.stopPolling();
}
private void setPollingInterval(long pollingInterval) {
this.samplingProfiler.setPollingInterval(pollingInterval);
}
public void manageRuntimeGraphsCollection(ManageHotSpotMethodsFromSuT action, Map<String, Object> parameters)
throws Exception {
switch (action) {
case START_POLLING:
setPollingInterval(parameters.containsKey(POLL_INTERVAL) ?
Long.parseLong(parameters.get(POLL_INTERVAL).toString()) :
DEFAULT_POLL_INTERVAL);
startPolling();
break;
case STOP_POLLING:
stopPolling();
break;
}
}
public SamplingProfiler getSamplingProfiler() {
return this.samplingProfiler;
}
@Required
public void setSamplingProfiler(SamplingProfiler samplingProfiler) {
this.samplingProfiler = samplingProfiler;
}
} |
One-Dimensional Optimization for ProportionalResonant Controller Design Against the Change in Source Impedance and Solar Irradiation in PV Systems The variation in source impedance and solar irradiation effects on photovoltaic (PV) system control performance is investigated. A proportional-resonant (PR) controller in a stationary frame in place of a proportional-integral controller in a synchronous frame was adopted to modulate a single-phase grid-tied inverter for PV systems. Although the PR controllers have gained some momentum lately due to advantages such as instantaneous tracking capability and low-cost computational resources, the tracking performance may decline due to changes in source impedance and solar irradiation where the conventional PR design rule is void. To adapt PR controllers over diverse operating conditions without incurring excessive tracking error, a one-dimensional optimization (ODO) algorithm that dictates the quality of the tracking performance is proposed to search for an appropriate factor between the real part of dominant and nondominant eigenvalues for the PV systems. The control gains of the PR controller can be then altered in light of the optimal through a simple algebraic conversion. The experimental results confirm the performance of the proposed strategy. |
Hydrolysis of nicosulfuron under acidic environment caused by oxalate secretion of a novel Penicillium oxalicum strain YC-WM1 A novel Penicillium oxalicum strain YC-WM1, isolated from activated sludge, was found to be capable of completely degrading 100mg/L of nicosulfuron within six days when incubated in GSM at 33°C. Nicosulfuron degradation rates were affected by GSM initial pH, nicosulfuron initial concentration, glucose initial concentration, and carbon source. After inoculation, the medium pH was decreased from 7.0 to 4.5 within one day and remained at around 3.5 during the next few days, in which nicosulfuron degraded quickly. Besides, 100mg/L of nicosulfuron were completely degraded in GSM medium at pH of 3.5 without incubation after 4 days. So, nicosulfuron degradation by YC-WM1 may be acidolysis. Based on HPLC analysis, GSM medium acidification was due to oxalate accumulation instead of lactic acid and oxalate, which was influenced by different carbon sources and had no relationship to nicosulfuron initial concentration. Furthermore, nicosulfuron broke into aminopyrimidine and pyridylsulfonamide as final products and could not be used as nitrogen source and mycelium didnt increase in GSM medium. Metabolomics results further showed that nicosulfuron degradation was not detected in intracellular. Therefore, oxalate secretion in GSM medium by strain YC-WM1 led to nicosulfuron acidolysis. mass spectrometry (LC-MS) have been applied to microbe, plant, and animal metabolism analysis 26. GC-MS has been widely used in the fields of functional genomics, metabolomics, and compounds identification because of its high accuracy and good repeatability 27,28. The objective of this study was to isolate a new microorganism species capable of efficiently degrading nicosulfuron. Here, we described the isolation, identification and degradation characteristics of Penicillium oxalicum sp. YC-WM1, which secreted oxalate and decreased the environmental pH values leading to nicosulfuron acidolysis. In addition, Penicillium oxalicum sp. YC-WM1 exhibited the highest nicosulfuron degradation efficiency compared to the other reported strains. Results Isolation and identification of YC-WM1. Through the enrichment culture method, a nicosulfuron-degrading fungus was isolated and named strain YC-WM1. YC-WM1 could degrade 100% of the initial nicosulfuron (100 mg/L) in GSM medium within three days (33 °C, inoculum biomass amount, 1.0 g dry wt/L). YC-WM1 was an obligate aerobe when grown on GSM plates. Colonies of YC-WM1 were green with smooth, dense, and powder-like textured surface and its conidia were spherical. A 561 bp ITS fragment of YC-WM1 was sequenced and phylogenetic analysis ( Fig. 1, ITS fragment was offered in supplementary file) was performed based on the ITS sequences which revealed that YC-WM1 was closely related to Penicillium oxalicum (100%). According to its morphological characteristics and phylogenetic analysis, YC-WM1 was identified as a Penicillium oxalicum strain. So far, this is the first time the Penicillium oxalicum species has been reported to be capable of degrading nicosulfuron. Degradation of nicosulfuron by YC-WM1. The degradation ability of YC-WM1 were studied in GSM medium with nicosulfuron as the sole nitrogen source at different temperatures (18, 23, 28, 33, and 37 °C), pH values (ranging from 5 to 8), initial concentrations of nicosulfuron (100, 50, 25, and 10 mg/L), and carbon sources (glucose, starch, sucrose, lactose, and glycerinum). As shown in Fig. 2A, nicosulfuron couldn't be detected after 3 days and the degradation rate came to 100% when the temperature was 33 °C and 37 °C ( Fig. 2A). So, we chose 33 °C as the optimal temperature for further analysis. Degradation of nicosulfuron under different initial pH conditions was explored to analyze its stability. The results (Fig. 2B) showed that when the initial pH values were higher than 7.0, degradation rates of nicosulfuron gradually decreased as the pH values increased. When the initial pH values were 5.0-7.0, the degradation rates of nicosulfuron were all 100% after 120 hours, indicating that YC-WM1 can adapt to neutral and weakly acidic environment (Fig. 2B). Under pH 7.0, YC-WM1 was incubated in GSM medium at 33 °C for 6 days with nicosulfuron as the sole nitrogen source at concentrations of 10, 25, 50, and 100 mg/L. For any group, the degradation rates of nicosulfuron were all 100% after 6 days, and it took about 2, 3, 4, and 6 days for different groups to degradate the nicosulfuron, which indicated that YC-WM1 possessed high potential values of application. The more nicosulfuron added, the more time it would take for complete degradation (Fig. 2C). The effect of initial concentrations of glucose was then studied on the performance of nicosulfuron degradation by YC-WM1. YC-WM1 was cultured under 33 °C and pH 7.0 for 6 days in GSM medium with different concentrations of glucose ranging from 0.25 g/L to 5 g/L while 100 mg/L of nicosulfuron was added. We found that when the initial glucose concentrations were 5, 1, and 0.5 g/L, 100 mg/L of nicosulfuron were totally degraded after 6 days. However, the degradation rate was only 19% when the glucose concentration was 0.25 g/L. Nicosulfuron was hardly degraded without glucose in the GSM medium (Fig. 2D). Our study showed that the nicosulfuron degradability of YC-WM1 is significantly affected by glucose content in the GSM medium. To analyze the importance of carbon sources to nicosulfuron degradation by YC-WM1, YC-WM1 was cultured in the GSM medium with different carbon sources, including glucose, sucrose, starch, lactose, and glycerinum. 100 mg/L nicosulfuron was supplemented. We found that the degradation rates were 100%, 98%, and 97% when the carbon sources were glucose, sucrose, and starch, respectively. However, nicosulfuron in GSM medium were hardly degraded when the carbon sources were lactose and glycerinum (Fig. 2E). Our study showed that the nicosulfuron degradability of YC-WM1 is significantly affected by carbon source. The nicosulfuron degradation rates by strain YC-WM1 varied along with the change of pH values in the GSM medium. The dynamic change of pH values in the GSM medium was monitored during the course of nicosulfuron degradation by YC-WM1. Our results showed that the pH values declined from 7 to 3.5 after inoculation for 2 days and little difference was detected afterwards. As shown in Fig. 3A, the dynamic change of pH values in GSM medium was correlated with the initial pH values. When the initial pH values were 5, 6, and 7, it dropped below 4 while 4.8 and 5.1 with initial pH values of 8 and 9, respectively. But, no significant difference was observed among groups with different initial concentrations of nicosulfuron as mentioned above (Fig. 3B). This indicated that dynamic change of pH values in GSM medium had nothing to do with nicosulfuron initial concentrations. An interesting finding in Fig. 3C was that the dynamic change of pH values was closely related with initial concentrations of glucose. When the initial concentrations of glucose were 5, 1, and 0.5 g/L, pH values were decreased to 3.4, 3.5, and 3.5 after 2 days, respectively. Simultaneously, pH values were much higher at any given time when little or no glucose was added into the GSM medium. Next, we manually adjusted the pH values of the solution to 7 every 24 hours, nicosulfuron was not degraded by YC-WM1 after incubation for 5 days. It was proposed that the degradation of nicosulfuron was caused by hydrolysis under acidic conditions due to metabolism of glucose by strain YC-WM1. In other words, the degradation of nicosulfuron was actually caused by the combined effects of microorganisms on acid secretion and chemical hydrolysis. The dynamic change of pH values was found to be closely related to carbon sources of the GSM medium (Fig. 3D). The pH values of GSM medium were decreased to 2.57 and 2.63 after 2 days when the carbon sources were glucose and starch, respectively. When the carbon sources was sucrose, the GMS medium pH values dropped to 3.74 and 2.75 after 2 and 3 days, respectively, at a relatively slower rate. The variation of pH values could be hardly observed when the carbon sources were glycerinum and lactose. Glycerinum and lactose weren't common carbon sources for bacteria and fungi on research of pesticide degradation. The hydrolysis products of starch are glucose while sucrose equally break into glucose and fructose. Before starch and sucrose were used as direct carbon source, it took some steps for starch and sucrose being decomposed into monosaccharide (glucose and fructose), and then the monosaccharide participated in glycolysis. However, these steps are enzymatic reactions which are very fast. Moreover, one molecule of sucrose and starch could be deposed into two and many monosaccharide molecules, respectively. So, the decrease rates of pH values were not obviously different when starch and sucrose were chosen as carbon sources compared to glucose. Starch-Iodine Color Reaction showed that starch was exhausted in two days (Fig. S4). Then we found that 100 mg/L of nicosulfuron were completely degraded in 4 days when added into GSM medium at pH of 3.5 without inoculation, which was consistent with the results stated above. So nicosulfuron is unstable under acid environment (especially under pH 5) and the mechanism of nicosulfuron degradation by YC-WM1 may be acidolysis. Relationship between mycelia growth and nicosulfuron degradation. The effect of different carbons and different initial concentrations of nicosulfuron on mycelia growth were studied. However, little variation on dry weight of mycelia were observed during the whole incubation while the pH values were decreased dramatically and nicosulfuron were degraded rapidly (Figs S5 and S6). Therefore, nicosulfuron degradation by YC-WM1 was due to acidolysis rather than enzyme catalysis. Hypothesis of mechanisms on nicosulfuron degradation by strain YC-WM1. To define the possible mechanism of nicosulfuron degradation by strain YC-WM1. The HPLC was applied to confirm the cleavage of the sulfonylurea group. We observed that the peak area designated for nicosulfuron was decreasing over time. Aminopyrimidine and pyridysulfonamide 29 were detected in the GSM medium after nicosulfuron was degraded by YC-WM1 (Figs S1 and S2). To check whether aminopyrimidine and pyridylsulfonamide could be metabolized by YC-WM1, the corresponding chemical standard substances were added into GSM and MSM inoculated with YC-WM1. We found that the concentrations of these two substances didn't decrease after being inoculated for 5 days. So, nicosulfuron turned into aminopyrimidine and pyridylsulfonamide as final products and could not be used as nitrogen source, which prevent YC-WM1 from proliferation. In the meantime, oxalate were accumulated in the GSM medium before and after nicosulfuron degradation by YC-WM1 (Fig. S3). There was a strong correlation between carbons source and oxalate yield 48 h after inoculation. In detail, when the carbon sources were glucose, starch, and sucrose, the concentration of oxalate in the GSM medium reached to 814, 908, and 1093 mg/L, respectively. The pH values in the GSM medium were around 3.5. However, the concentration of oxalate were only 154 and 55 mg/L with the carbon sources of lactose and glycerinum and the pH values were above 6 ( Fig. 3E). No relationship was found between nicosulfuron initial concentrations and oxalate production on the second day. The pH values in the GSM medium were all around 3.5 (Fig. 3F). In addition, some other organic acids, such as lactic acid and acetic acid, were not detected in GSM medium described above (Fig. S3). Therefore, the decrease of pH values in the GSM medium by strain YC-WM1 was due to oxalate accumulation, which led to nicosulfuron acidolysis. Metabolic analysis of strain YC-WM1. The GC-TOF/MS TIC chromatograms of strain YC-WM1 with or without nicosulfuron exposure were shown in Fig. 4. There were various differences in the shape and quantity of peaks between different treatments, with unique peaks in each biofluid. 251 peaks were detected in the strain YC-WM1 and 249 annotated metabolites were identified by GC-MS. 249 annotated metabolites contained about 34 amino acids, 96 organic acids, 42 carbohydrates, 16 alcohols, 13 amines, 10 fatty acids, 46 various organic class compounds, and 1 inorganic compound. Compared to the control groups (CGs), concentrations of 67 metabolites increased in the nicosulfuron treated groups (TGs), while 93 decreased (p < 0.05). The similarity and diversity of samples with different treatments were determined by the method of principal component analysis (PCA) where parallel samples were clustered (Fig. 5). The results showed that metabolites were noticeably separated between the TGs and CGs. As shown in Fig. 6, 30 metabolism pathways (p < 0.05, the pathway impact values were higher than 0.1), including 160 metabolites with significant difference between the TGs and CGs, were enriched. However, only 20 of them were up-regulated and the other 10 were down-regulated, as shown in Table 1. The pathways mentioned above were cross-linked together. As for TCA cycle (KEGG pathway map 00020), nicosulfuron treatment significantly enhanced the production of L-malic acid, citric acid, oxalacetic acid, acidalpha-ketoglutaric acid, fumaric acid, and succinic acid (P < 0.05). Fumaric and L-malic acids were important intermediates in energy generation. The increase of intermediates in glycolysis (KEGG pathway map 00010) might indicate that energy related metabolism was enhanced in response to nicosulfuron treatment. The obvious increase of fructose levels in TGs would have a great impact on regulation of the glycolytic metabolism and even the entire glycolysis pathway. The enhanced energy metabolism was considered as a typical adaptive response under nicosulfuron stress, which is a ubiquitous mechanism existing among animals, plants, and microbe. Therefore, the enhancement of energy metabolism might contribute to nicosulfuron-induced resistant response. Besides, some organic substances, including 6-phosphogluconic acid, D-glyceric acid, trehalose, sucrose, gluconic acid, ribose-5-phosphate, gluconic acid, D-ribose, and 6-phosphogluconic acid related to pentose phosphate pathway (KEGG pathway map 00030), were down-regulated in the TGs (P < 0.05). In this study, although pyruvate metabolism (KEGG pathway map00620) pathway was up-regulated in TGs, pyruvate and its downstream molecules were not detected. Malic acid, oxaloacetic acid, and citrate, with higher concentrations in TGs, actually did not participate or had little involvement in pyruvate metabolism. On the contrary, nicosulfuron, aminopyrimidine, and pyridylsulfonamide were not detected among the various metabolites, which further proved that nicosulfuron hydrolysis occurred in the extracellular circumstance of the fungal cells. Propanoate metabolism (KEGG pathway map00640) was promoted in TGs while the concentration of oxalate was of no significant difference between TGs and CGs. Higher concentrations of phenylethylamine in TGs indicated that nicosulfuron may act as a stressor for YC-WM 30. Glycerophospholipids, as glycerol-based phospholipids, were main components of biological membranes and play an important role in the generation of both extracellular and intracellular signals. Glycerophospholipid metabolism was suppressed in TGs, which may guarantee the process of cellular transport. Discussion Microbes play an important role in degradation of herbicides in natural environment. Numerous microbial groups that can degrade or inactivate these chemicals have been identified. Degradation efficiency of tribenuron methyl (TBM) by Pseudomonas sp. strain NyZ42 was about 80% with initial concentration of 200 mg/L within 4 days 31. Some other microbes, capable of degrading metsulfuron-methy, have been isolated 32,33. A group of microbes that can degrade chlorimuron-ethyl have also been identified 32,34,35. In addition, Serratia marcescens N80 with nicosulfuron as the sole nitrogen source, could degradate 93.6% nicosulfuron with initial concentration of 10 mg/L in 96 hours. Serratia marcescens N80 also had degradation function on some other sulfonylurea herbicides, including ethametsulfuron, tribenuron-methyl, metsulfuron-methyl, chlorimuron-ethyl, and rimsulfuron 19. In our study, a nicosulfuron-hydrolyzing strain YC-WM1 was isolated from nicosulfuron contaminated sludge and identified as Penicillium oxalicum. At present, degradating mechanism of sulfonylureas is still not very clear. Two patterns of sulfonylureas degradation have been reported, pure biological process 5 and microbial acidohydrolysis 22. As for biological degradation pattern, only three relevant proteins purified from Bacillus subtilis YB1 with the capacity of nicosulfuron degradation were identified, namely manganese ABC transporter, vegetative catalase 1, and acetoin dehydrogenase E1 21. According to some reports, the degradation of sulfonylureas herbicides was accounted for acidolysis 5,22,36. Sulfonylurea herbicides are unstable under acidic conditions due to low isoelectric point. For instance, strain BW30 was capable of converting glucose (or other carbon compounds in soil) into short-chain fatty acids, including oxalic and lactic acids. These short-chain fatty acids then attacked the sulfonylurea bridge and finally resulted in the breakdown of TBM molecules 37. In our study, glucose metabolism was essential to nicosulfuron degradation by Penicillium oxalicum YC-WM1. Firstly, we measured the amount of oxalate in the GSM culture inoculated with YC-WM1 for 3 days and accorded the pH value. The oxalate concentration was 832 mg/L and the pH value was 2.57. Then, we added equal amount of oxalate to the GSM medium without inoculation of YC-WM1 and detected its pH value. It was interesting that the pH value was very close to 2.57. So, the decrease of pH values during the process of nicosulfuron degradation by Penicillium oxalicum YC-WM1 was due to the accumulation of oxalate produced by glucose metabolism. As nicosulfuron is unstable under acidic conditions and its degradation rates by YC-WM1 were closely related to pH variations and initial concentrations of glucose, so the degrading mechanism of nicosulfuron by YC-WM1 belonged to microbial acidolysis. Lately, Alcaligenes faecalis ZWS11 was recorded with the character of degrading nicosulfuron. However, Alcaligenes faecalis ZWS11 did not utilize nicosulfuron as carbon sources or nitrogen source because the GSM medium they described contained carbo sources (glucose) and nitrogen source (NH 4 NO 3 ) 2. So, the degradation mechanism of nicosulfuron by ZWS11 should be further comfirmed. Metabolites trigger cellular reactions in different pathways that mediate and perform multiple cellular functions. Metabolite profiling changes in biological functions or phenotypes are common in response to genetic or environmental stimulation 29,38,39. GC-TOF/MS-based metabonomics were used in conjunction with multivariate statistics to examine the metabolite alteration of Penicillium oxalicum YC-WM1 induced by nicosulfuron exposure. PLS-DA and OPLS-DA analysis of metabolism showed great differences between the TGs and CGs, which indicated that nicosulfuron exposure affected the metabolism of Penicillium oxalicum YC-WM1 to a certain extent. We not only compared the differential levels of metabolites between TGs and CGs, but also pinpointed the corresponding pathways 40,41. Based on metabolic pathway analysis, 20 significant metabolic pathways have been identified from the TGs compared to the CGs. The pentose phosphate pathway, a biochemical pathway parallel to glycolysis, can generate NADPH and 5-carbon sugars and involves oxidation of glucose. However, its primary role is anabolic metabolism rather than catabolic action (such as fatty-acid synthesis and assimilation of inorganic nitrogen) 11,39,42. The reasons for down-regulation of the pentose phosphate pathway may be due to the lack of N source and the enhanced energy metabolic activity under nicosulfuron stress. Oxalic acid, a metabolic end product for plants and fungi, has three chemical natures: proton and electron source and a strong metal chelator 26. In GSM medium inoculated with Penicillium oxalicum YC-WM1, large amounts of oxalate were accumulated resulting in decrease of pH values, which led to acidolysis of nicosulfuron. In fungi, the biosynthesis of oxalate occurs in cell via the glyoxylate cycle or the tricarboxylic acid cycle 43. The substances related to glyoxylate cycle and tricarboxylic acid cycle were all detected except glyoxylate in intracellular, including citrate, isocitrate, succinate, fumarate, malate, oxaloacetate. At the final step, oxalate is produced either by oxidation of glyoxylate to oxalate or cleavage of oxaloacetate to oxalate and acetate 44. Upon these findings, we proposed a biological adaptation model (Fig. 7). Amount of oxalate was generated and secreted into the environment which led to the acidolysis of nicosulfuron since nicosulfuron is unstable under acidic conditions. Enriched pathway Hits Impact Change Enrichment, isolation, and screening of nicosulfuron-degrading strains. Nicosulfuron-contaminated sludge was collected from Shandong Huayang Science and Technology Co., Ltd., a pesticide manufacturer in Shandong, China. Ten milliliters of activated sludge were transferred into 250 mL Erlenmeyer flasks containing 100 mL of sterilized enrichment culture with 100 mg/L nicosulfuron. The enrichment culture was incubated in a rotary shaker (100 rpm) at 30 °C in the dark 45. After 7 days, cultures were serially diluted and plated on GSM agar plates supplemented with 600 mg/L nicosulfuron. The plates were incubated at 30 °C, 100 rpm for 5 days, and a single fungal colony was selected and pure colonies were obtained by restreaking for three times. The ability of isolates to degrade nicosulfuron was determined by HPLC, using an Agilent 1200 system equipped with a C 18 column and operated with gradient elution of a solvent mixture (1 ml/min) of acetonitrile (20%) and distilled water containing 0.05% acetic acid (80%). Identification and characterization of nicosulfuron-degrading strains. The isolate with the capacity of nicosulfuron degradation was selected for further analysis, while the characterization and identification were based on its morphological and internal transcribed spacer (ITS) sequence analysis. The ITS sequence was amplified by PCR with the universal primers: ITS1 (5-TCCGTAGGTGAACCTGCGG-3) and ITS4 (5-TCCTCCGCTTATTGATATGC-3 Identification of metabolites in GSM culture. After being incubated for 2 days in 100 mL liquid medium containing 100 mg/L nicosulfuron, several metabolites, such as aminopyrimidine, pyridyl sulfonamide, lactic acid and so on, were detected by an Agilent 1200 HPLC. Metabonomics analysis of YC-WM1 related with nicosulfuron degradation. Extraction of metabolites in fungal YC-WM1. YC-MW1 was inoculated in GSM liquid medium containing 100 mg/L nicosulfuron for 2.5 days when 50% of the initial nicosulfuron was hydrolyzed. The mixture was transferred to 50 mL tubes and centrifuged to pellet cells at 6000 rpm. The supernatant was discarded; the pellet was washed thrice and resuspended in PBS. A certain amount of yellow basket-shaped mycelium samples was placed into 2 mL tubes. 50 L of L-2-Chlorophenylalanine (0.1 mg/mL stock solution in ddH 2 O), as an internal standard, and 0.4 mL of the extraction. To homogenize the samples, steels balls were placed into the sample tubes then homogenized in ball mill for 5 minutes at 70 Hz. The samples were then centrifuged at 12000 rpm for 15 minutes at 4 °C. 0.35 mL of the supernatant were transferred into a fresh 2 mL GC/MS glass vial. Derivation of metabolites extracted from fungal YC-WM1. The samples were dried in a vacuum concentrator at low temperature. 80 L of methoxyamination reagent (20 mg/mL in pyridine) was added to the samples and kept for 2 hours at 37 °C. 0.1 mL of BSTFA regent (1% TMCS, v/v) was then added to the sample aliquots and kept for 1 hour at 70 °C. The samples were left to cool to room temperature and were later used for GC-MS analysis. Detection of metabolites extracted from fungal YC-WM1. GC/TOF MS analysis was performed using an Agilent 7890 gas chromatograph system coupled with a Pegasus HT time-of-flight mass spectrometer. The system utilized a DB-5MScapillary column coated with 5% diphenyl cross-linked with 95% dimethylpolysiloxane (30 m 250 m inner diameter, 0.25 m film thickness, J&W Scientific, Folsom, CA, USA). 1 L aliquot of the analyte was injected in splitless mode. Helium was used as the carrier gas. At rate of 1 ml/min through the column and the front inlet purge flow was 3 ml/min. The initial temperature was kept at 50 °C for 1 minute, then raised to 330 °C at a rate of 10 °C/min, then kept for 5 minutes at 330 °C. The injection, transfer line, and ion source temperatures were 280, 280, and 220 °C, respectively. The energy was −70 eV in electron impact mode. The mass spectrometry data were acquired in full-scan mode with the m/z range of 85-600 at a rate of 20 spectra/s after a solvent delay of 360 s. Data analysis. In this study, missing values of raw data were filled up using half of the minimum values across samples, and 248 peaks were detected. So, 248 metabolites were left through interquartile range denoising method. In addition, internal standard normalization method was employed in this data analysis. The resulting three-dimensional data involving the peak number, sample name, and normalized peak area were fed to SIMCA-P 11.5 software (Umetrics, Umea, Sweden) for principal component analysis (PCA), partial least squares discriminant analysis (PLS-DA) and orthogonal projections to latent structures-discriminant analysis (OPLS). PCA analysis showed the distribution of the original data. In order to obtain a higher level of group separation and get a better understanding of variables responsible for classification, a PLS-DA analysis was applied. Afterwards, the parameters for the classification from the software were R2Y = 0.976 and Q2Y = 0.773 which were stable and good to fitness and prediction. A 7-fold cross-validation was used to estimate the robustness and the predictive ability of our model. Such permutation test was proceeded in order to further validate the model. The R2 and Q2 intercepted values were after 200 permutations. The low value of Q2 intercept indicate the robustness of the models, and thus show a low risk of over fitting and reliable. Based on OPLS analysis, a loading plot was constructed, which showed the contribution of variables to difference between the experimental group and the control group. It also showed the significant variables which were situated far from the origin but the loading plot was complex because of many variables. To refine this analysis, the first principal component of variable importance projection (VIP) was obtained. VIP values exceeding 1.0 were first selected as changed metabolites. In step 2, the remaining variables were then assessed by Student's T test (T-test, P < 0.05), and variables were discarded between two groups. In addition, commercial databases, including KEGG (http://www.genome.jp/kegg/) and NIST (http://www.nist.gov/index.html) were utilized to search for metabolites. The heat map was established by R project. Metaboanalyst (www.metaboanalyst.ca/) was used for pathway construction by employing the rice metabolic pathway databases as reference for global test algorithm. |
package com.griffiths.hugh.declarative_knitting.core.model.stitches;
public class Loop {
private final int colour;
public Loop(int colour) {
this.colour = colour;
}
public int getColour() {
return colour;
}
}
|
/*
* Copyright (C) 2021 Vaticle
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import {Numeric} from "../../api/answer/Numeric";
import {Numeric as NumericProto} from "typedb-protocol/common/answer_pb";
import {ErrorMessage} from "../../common/errors/ErrorMessage";
import {TypeDBClientError} from "../../common/errors/TypeDBClientError";
import ILLEGAL_CAST = ErrorMessage.Internal.ILLEGAL_CAST;
import BAD_ANSWER_TYPE = ErrorMessage.Query.BAD_ANSWER_TYPE;
export class NumericImpl implements Numeric {
private readonly _value?: number;
constructor(value?: number) {
this._value = value;
}
asNumber(): number {
if (this.isNumber()) return this._value;
else throw new TypeDBClientError(ILLEGAL_CAST.message("NaN", "number"));
}
isNaN(): boolean {
return !this.isNumber();
}
isNumber(): boolean {
return this._value != null;
}
public toString() {
return this.isNumber() ? `${this.asNumber()}` : 'NaN';
}
}
export namespace NumericImpl {
export function of(numericProto: NumericProto) {
switch (numericProto.getValueCase()) {
case NumericProto.ValueCase.NAN:
return new NumericImpl(null);
case NumericProto.ValueCase.DOUBLE_VALUE:
return new NumericImpl(numericProto.getDoubleValue());
case NumericProto.ValueCase.LONG_VALUE:
return new NumericImpl(numericProto.getLongValue());
case NumericProto.ValueCase.VALUE_NOT_SET:
default:
throw new TypeDBClientError(BAD_ANSWER_TYPE.message(numericProto.getValueCase()));
}
}
}
|
Real-time implementation of multiple feedback loop control for a permanent magnet synchronous motor drive Permanent magnet synchronous motor drives are widely used in high performance applications. In these applications, the drive speed should fellow accurately a certain command trajectory and recover from sudden load disturbances very quickly. The systematic design procedures for both speed and current controllers have been presented. A speed controller using a synchronous reference frame PI regulator is employed as an outer loop. Moreover, two synchronous frame PI regulators are employed as inner loops to control the direct and quadrature axis current components of the motor. The complete drive has been implemented in real-time using digital signal processor (DSP) board DS-1102. The performance of the drive has been investigated by simulation as well as experimental results. The results have proved the superior performance and robustness of the proposed controllers. |
Effect of Inorganic Salts on Pore Structure of Cement Paste Inorganic salts are important admixtures usually used in cold weather concrete. As research basic of influence of salts on concrete durability, effects of inorganic salts on pore structure of cement paste were studied in this paper, and possible implications of concrete property with pore structure was also analyzed. Pore structure of paste added CaCl2, NaCl, Na2SO4, NaNO2, Ca(NO3)2 and Ca(NO2)2 curing for 3 days and 28 days were tested through mercury intrusion porosimetry (MIP). The results showed that no matter 0.3 or 0.5 water-cement ratio, the pores whose diameter <50nm in paste with salts increased at 3 days, which was harmful for the control of concrete shrinkage and cracking at early age. Adding Ca (NO3)2 increased coarse pores (>200nm) of paste at 3 days, but these coarse pores turned into fine pores and reduced significantly at 28 days. Adding NaCl and Na2SO4 into cement paste raised coarse pores with size>1000nm at 3 days and 28days, which were harmful for the pore structure. |
import os
import shutil
import time
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from posts.models import Group, Post, User
from yatube.settings import BASE_DIR
@override_settings(MEDIA_ROOT=os.path.join(BASE_DIR, 'temp_folder'))
class PostsPagesTests(TestCase):
@classmethod
def setUpClass(cls):
"""Создадим две записи, для одной из записей создадим группу
еще одну группу создадим но оставим пустой для проверки"""
super().setUpClass()
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.empty_group = Group.objects.create(
title='Группа без постов',
slug='test_empty',
description='Пустая тестовая группа',
)
cls.filled_group = Group.objects.create(
title='Группа с постами',
slug='test_filled',
description='Тестовая группа c одним постом',
)
cls.test_post_with_gr = Post.objects.create(
text='Текст первого поста который относится к группе с постами',
author=User.objects.create(username='group_filled_username'),
group=cls.filled_group,
image=cls.uploaded,
)
time.sleep(0.1)
cls.user_author = User.objects.create_user(
username='group_empty_username'
)
cls.test_post_without_gr = Post.objects.create(
text='Текст второго поста у которого нет группы',
author=cls.user_author,
image=cls.uploaded,
)
cls.edit_post_id = cls.test_post_without_gr.id
@classmethod
def tearDownClass(cls):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(PostsPagesTests.user_author)
def post_pages_use_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
templates_page_names = {
'index.html': reverse('posts:index'),
'new_post.html': reverse('posts:new_post'),
'group.html': (
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.empty_group.slug}'}
)
),
}
for template, reverse_name in templates_page_names.items():
with self.subTest(template=template):
response = self.authorized_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def test_index_page_list_is_2(self):
""" Удостоверимся, что на главную страницу передаётся
ожидаемое количество объектов"""
response = self.authorized_client.get(reverse('posts:index'))
self.assertEqual(len(response.context['page']), 2)
def test_group_filled_slug_page_list_is_1(self):
""" Удостоверимся, что на страницу группы с одним постом передаётся
ожидаемое количество объектов"""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.filled_group.slug}'}
)
)
self.assertEqual(len(response.context['page']), 1)
def test_group_filled_slug_page_list_is_0(self):
""" Проверим, что пост не попал в группу,
для которой не был предназначен"""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.empty_group.slug}'}
)
)
self.assertEqual(len(response.context['page']), 0)
def test_index_page_show_correct_context(self):
"""Шаблон index сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:index'))
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image_0 is not None)
def test_group_page_show_correct_context(self):
"""Шаблон group сформирован с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:group_posts',
kwargs={'slug': f'{PostsPagesTests.filled_group.slug}'}
)
)
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_with_gr.text}'
)
self.assertEqual(
response.context['group'].title,
f'{PostsPagesTests.filled_group.title}'
)
self.assertEqual(
response.context['group'].description,
f'{PostsPagesTests.filled_group.description}'
)
self.assertEqual(
response.context['group'].slug,
f'{PostsPagesTests.filled_group.slug}'
)
self.assertTrue(post_image_0 is not None)
def test_new_post_show_correct_context(self):
"""Шаблон new_post сформирован с правильным контекстом."""
response = self.authorized_client.get(reverse('posts:new_post'))
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
def test_profile_show_correct_context(self):
"""Страница профиля сформирован с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:profile',
kwargs={'username': f'{PostsPagesTests.user_author.username}'}
)
)
first_object = response.context['page'][0]
post_text_0 = first_object.text
post_image_0 = first_object.image
self.assertEqual(
post_text_0,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image_0 is not None)
def test_post_show_correct_context(self):
"""Страница поста сформирована с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:post',
kwargs={
'username': f'{PostsPagesTests.user_author.username}',
'post_id': PostsPagesTests.edit_post_id,
}
)
)
first_object = response.context['post']
post_text = first_object.text
post_image = first_object.image
self.assertEqual(
post_text,
f'{PostsPagesTests.test_post_without_gr.text}'
)
self.assertTrue(post_image is not None)
def test_post_edit_show_correct_context(self):
"""Страница редактирования поста сформирована
с правильным контекстом."""
response = self.authorized_client.get(
reverse(
'posts:post_edit',
kwargs={
'username': f'{PostsPagesTests.user_author.username}',
'post_id': PostsPagesTests.edit_post_id,
}
)
)
form_fields = {
'group': forms.fields.ChoiceField,
'text': forms.fields.CharField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_field = response.context['form'].fields[value]
self.assertIsInstance(form_field, expected)
class IndexPaginatorTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.author = User.objects.create(username='test_user')
for i in reversed(range(1, 14)):
Post.objects.create(
text=f'Тестовый текст {i}го поста',
author=cls.author,
)
time.sleep(0.1)
def setUp(self):
self.guest_client = Client()
def test_first_page_contains_ten_records(self):
""" На главной странице 10 постов """
response = self.guest_client.get(reverse('posts:index'))
self.assertEqual(len(response.context['page']), 10)
def test_second_page_contains_three_records(self):
""" На второй странице должно быть три поста. """
response = self.guest_client.get(reverse('posts:index') + '?page=2')
self.assertEqual(len(response.context.get('page')), 3)
def test_index_page_show_correct_posts(self):
""" Посты выводятся в правильном порядке"""
response = self.guest_client.get(reverse('posts:index'))
first_object = response.context['page'][0]
post_text_0 = first_object.text
self.assertEqual(
post_text_0,
'Тестовый текст 1го поста'
)
class CacheIndexTest(TestCase):
def setUp(self):
self.guest_client = Client()
def test_cache_index_page(self):
"""Проверим что вывод главной страницы кэшируется корректно"""
Post.objects.create(
text='Тестовый текст первого поста',
author=User.objects.create(
username='test_username1'
)
)
first_response = self.guest_client.get(reverse('posts:index'))
Post.objects.create(
text='Тестовый текст второго поста',
author=User.objects.create(
username='test_username2'
)
)
second_response = self.guest_client.get(reverse('posts:index'))
first_response_content = first_response.content
second_response_content = second_response.content
self.assertEqual(
first_response_content,
second_response_content
)
cache.clear()
third_response = self.guest_client.get(reverse('posts:index'))
third_response_content = third_response.content
self.assertNotEqual(
first_response_content,
third_response_content
)
class FollowTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.follower_user = User.objects.create_user(
username='follower_user'
)
cls.followed_user = User.objects.create_user(
username='followed_user'
)
def setUp(self):
self.authorized_follower_client = Client()
self.authorized_followed_client = Client()
self.authorized_follower_client.force_login(
FollowTests.follower_user
)
self.authorized_followed_client.force_login(
FollowTests.followed_user
)
def test_follow_works_properly(self):
"""Проверим, что авторизованный пользователь может
подписываться на других пользователей"""
follower_number_initial = (FollowTests.followed_user.
following.all().count())
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_follow = (FollowTests.followed_user.
following.all().count())
self.assertNotEqual(
follower_number_initial,
follower_number_follow
)
def test_unfollow_works_properly(self):
"""Проверим, что авторизованный пользователь может
удалять пользователей из подписок"""
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_initial = (FollowTests.followed_user.
following.all().count())
self.authorized_follower_client.get(
reverse(
'posts:profile_unfollow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_number_unfollow = (FollowTests.followed_user.
following.all().count())
self.assertNotEqual(
follower_number_initial,
follower_number_unfollow
)
def test_new_posts_appear_in_follower_page(self):
"""Новая запись пользователя появляется в ленте тех, кто
на него подписан и не появляется в ленте тех, кто не подписан"""
self.authorized_follower_client.get(
reverse(
'posts:profile_follow',
kwargs={
'username': f'{FollowTests.followed_user.username}',
}
)
)
follower_intial_response = self.authorized_follower_client.get(
reverse('posts:follow_index')
)
followed_intial_response = self.authorized_followed_client.get(
reverse('posts:follow_index')
)
follower_initial_post_quantity = len(follower_intial_response.
context['page'])
followed_initial_post_quantity = len(followed_intial_response.
context['page'])
Post.objects.create(
text='Тестовый текст',
author=FollowTests.followed_user
)
follower_second_response = self.authorized_follower_client.get(
reverse('posts:follow_index')
)
followed_second_response = self.authorized_followed_client.get(
reverse('posts:follow_index')
)
follower_second_post_quantity = len(follower_second_response.
context['page'])
followed_second_post_quantity = len(followed_second_response.
context['page'])
self.assertNotEqual(
follower_initial_post_quantity,
follower_second_post_quantity
)
self.assertEqual(
followed_initial_post_quantity,
followed_second_post_quantity
)
|
Animal, Magnetism, Theatricality in Ibsen's The Wild Duck This article contends that it is necessary to go beyond the reductionism of certain investments in Ibsen criticismeither feminism or theater history?to perceive that in The Wild Duck it is through his treatment of the animal that Ibsen is able to address, with elegant economy, both an ethics of alterity and aesthetic concerns with theatricality and mediation. Ibsen achieves this through an exploration of the ways that different forms of mediation?theatrical, technological, intersubjective, and occultall pivot about the same aesthetic questions that plague discussions of animality. In so doing, The Wild Duck also critiques period notions of "the human." |
Tandem Mass Spectrometry Has a Larger Analytical Range than Fluorescence Assays of Lysosomal Enzymes: Application to Newborn Screening and Diagnosis of Mucopolysaccharidoses Types II, IVA, and VI. BACKGROUND There is interest in newborn screening and diagnosis of lysosomal storage diseases because of the development of treatment options that improve clinical outcome. Assays of lysosomal enzymes with high analytical range (ratio of assay response from the enzymatic reaction divided by the assay response due to nonenzymatic processes) are desirable because they are predicted to lead to a lower rate of false positives in population screening and to more accurate diagnoses. METHODS We designed new tandem mass spectrometry (MS/MS) assays that give the largest analytical ranges reported to date for the use of dried blood spots (DBS) for detection of mucopolysaccharidoses type II (MPS-II), MPS-IVA, and MPS-VI. For comparison, we carried out fluorometric assays of 6 lysosomal enzymes using 4-methylumbelliferyl (4MU)-substrate conjugates. RESULTS The MS/MS assays for MPS-II, -IVA, and -VI displayed analytical ranges that are 1-2 orders of magnitude higher than those for the corresponding fluorometric assays. The relatively small analytical ranges of the 4MU assays are due to the intrinsic fluorescence of the 4MU substrates, which cause high background in the assay response. CONCLUSIONS These highly reproducible MS/MS assays for MPS-II, -IVA, and -VI can support multiplex newborn screening of these lysosomal storage diseases. MS/MS assays of lysosomal enzymes outperform 4MU fluorometric assays in terms of analytical range. Ongoing pilot studies will allow us to gauge the impact of the increased analytical range on newborn screening performance. |
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2014 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_CORE_TAKER_H_INCLUDED
#define RIPPLE_CORE_TAKER_H_INCLUDED
#include <ripple/module/app/book/Amounts.h>
#include <ripple/module/app/book/Quality.h>
#include <ripple/module/app/book/Offer.h>
#include <ripple/module/app/book/Types.h>
#include <beast/streams/debug_ostream.h>
#include <beast/utility/noexcept.h>
#include <functional>
namespace ripple {
namespace core {
/** State for the active party during order book or payment operations. */
class Taker
{
public:
struct Options
{
Options() = delete;
explicit
Options (std::uint32_t tx_flags)
: sell (tx_flags & tfSell)
, passive (tx_flags & tfPassive)
, fill_or_kill (tx_flags & tfFillOrKill)
, immediate_or_cancel (tx_flags & tfImmediateOrCancel)
{
}
bool const sell;
bool const passive;
bool const fill_or_kill;
bool const immediate_or_cancel;
};
private:
std::reference_wrapper <LedgerView> m_view;
Account m_account;
Options m_options;
Quality m_quality;
Quality m_threshold;
// The original in and out quantities.
Amounts const m_amount;
// The amounts still left over for us to try and take.
Amounts m_remain;
Amounts
flow (Amounts amount, Offer const& offer, Account const& taker);
TER
fill (Offer const& offer, Amounts const& amount);
TER
fill (Offer const& leg1, Amounts const& amount1,
Offer const& leg2, Amounts const& amount2);
void
consume (Offer const& offer, Amounts const& consumed) const;
public:
Taker (LedgerView& view, Account const& account,
Amounts const& amount, Options const& options);
LedgerView&
view () const noexcept
{
return m_view;
}
/** Returns the amount remaining on the offer.
This is the amount at which the offer should be placed. It may either
be for the full amount when there were no crossing offers, or for zero
when the offer fully crossed, or any amount in between.
It is always at the original offer quality (m_quality)
*/
Amounts
remaining_offer () const;
/** Returns the account identifier of the taker. */
Account const&
account () const noexcept
{
return m_account;
}
/** Returns `true` if the quality does not meet the taker's requirements. */
bool
reject (Quality const& quality) const noexcept
{
return quality < m_threshold;
}
/** Returns `true` if order crossing should not continue.
Order processing is stopped if the taker's order quantities have
been reached, or if the taker has run out of input funds.
*/
bool
done () const;
/** Perform direct crossing through given offer.
@return tesSUCCESS on success, error code otherwise.
*/
TER
cross (Offer const& offer);
/** Perform bridged crossing through given offers.
@return tesSUCCESS on success, error code otherwise.
*/
TER
cross (Offer const& leg1, Offer const& leg2);
};
inline
std::ostream&
operator<< (std::ostream& os, Taker const& taker)
{
return os << taker.account();
}
}
}
#endif
|
More mighty than the waves of the sea: toilers, tariffs, and the income tax movement, 18801913 In the early decades of the twentieth century, the progressive income tax gradually came to dominate the U.S. system of public finance. The move to a direct and progressive tax regime had it roots in the social movements of the time period. This article examines organized labor's attitudes towards taxation at the turn of the century. It explores the pivotal role that members of the Knights of Labor and the American Federation of Labor played in facilitating this historic shift in U.S. tax policy. By examining what national leaders and rank-and-file members of organized labor said and did about taxation, this article contends that the American labor movement played a significant - though halting - part in helping establish a fairer and more effective, modern system of taxation. |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ZomatoItem(scrapy.Item):
# define the fields for your item here like:
category = scrapy.Field()
names = scrapy.Field()
rating = scrapy.Field()
reviews = scrapy.Field()
location = scrapy.Field()
cost = scrapy.Field()
cuisine = scrapy.Field()
featured = scrapy.Field()
urls = scrapy.Field()
class ZlocItem(scrapy.Item):
location = scrapy.Field()
names = scrapy.Field() |
<filename>eden/cli/tabulate.py
#!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from typing import Dict, List, Optional
def tabulate(
headers: List[str],
rows: List[Dict[str, str]],
header_labels: Optional[Dict[str, str]] = None,
) -> str:
""" Tabulate some data so that it renders reasonably.
rows - is a list of data that is to be rendered
headers - is a list of the dictionary keys of the row data to
be rendered and specifies the order of the fields.
header_labels - an optional mapping from dictionary key to a
more human friendly label for that key.
A missing mapping is defaulted to the uppercased
version of the key
Returns a string holding the tabulated result
"""
col_widths = {}
def label(name) -> str:
label = (header_labels or {}).get(name, "")
if label:
return label
return str(name.upper())
def field(obj, name) -> str:
return str(obj.get(name, ""))
for name in headers:
col_widths[name] = len(label(name))
for row in rows:
for name in headers:
col_widths[name] = max(len(field(row, name)), col_widths[name])
format_string = ""
for col_width in col_widths.values():
if format_string:
format_string += " "
format_string += "{:<%d}" % col_width
output = format_string.format(*[label(name) for name in headers])
for row in rows:
output += "\n"
output += format_string.format(*[field(row, name) for name in headers])
return output
|
A Columbia University report commissioned by the American Petroleum Institute in 1982 cautioned that global warming "can have serious consequences for man's comfort and survival." It is the latest indication that the oil industry learned of the possible threat it posed to the climate far earlier than previously known.
The report, "Climate Models and CO2 Warming, A Selective Review and Summary," was written by Alan Oppenheim and William L. Donn of Columbia's Lamont-Doherty Geological Observatory for API's Climate and Energy task force, said James J. Nelson, the task force's former director. From 1979 to 1983, API and the nation's largest oil companies convened the task force to monitor and share climate research, including their in-house efforts. Exxon ran the most ambitious of the corporate programs, but other oil companies had their own projects, smaller than Exxon's and focused largely on climate modeling.
The task force commissioned the report to better understand the models being produced in the nascent field of climate science, Nelson said.
"There was discussion in the committee about all the noise and information" around carbon dioxide, Nelson said. "There were all sorts of numbers being thrown around. We were not trying to find a model to hang our hats on. It was more, 'If you see this model, this is how it's built and these are its strengths and weaknesses.'"
Obtained from a university library by the Union of Concerned Scientists and made available to InsideClimate News, the report described in detail five models used at the time by climate scientists. They ranged from simple to complex: the radiation balance model, energy balance, radiative-convective, thermodynamic and general circulation model. A table showed the predictions each model generated of the average increase in global temperature if atmospheric concentrations of CO2 doubled compared to pre-industrial times, from .6 degrees C per hemisphere under the thermodynamic model to 2 to 3.5 degrees C globally under the general circulation model. The poles were expected to undergo even greater jumps in temperature.
The report did not focus on the forces behind the increase in CO2 concentrations, but it linked the phenomenon plainly to fossil fuel use. Atmospheric CO2, it said, "is expected to double some time in the next century. Just when depends on the particular estimate of the level of increasing energy use per year and the mix of carbon based fuels."
Like many studies at the time, the report stressed the models' inherent uncertainties. "All models are still sufficiently unrealistic that a definitive evaluation of the problem requires continued effort," the authors wrote in the summary.
Still, the report concluded that the models pointed to hikes in global average temperatures as CO2 concentrations rose. "They all predict some kind of increase in temperature within a global mean range of 4 degrees C," the report stated. "The consensus is that high latitudes will be heated more than the equator and the land areas more than the oceans."
The consensus turned out to accurately predict how global warming has proceeded since then and matches what current models are still predicting for the future.
The consequences for humanity were serious, the authors wrote, "since patterns of aridity and rainfall can change, the height of the sea level can increase considerably and the world food supply can be affected."
The authors concluded that "optimum forecasting of climate changes is a necessity for any realistic long term planning by government and industry."
When it commissioned the report, Nelson said, the API task force did not provide any guidance on which models to use and did not meddle with the assessment. Committee members received periodic updates about the report's progress, he said. The final document "was well received by the committee," Nelson recalled. "We didn't change it at all. Copies were sent to all the member companies since they paid for it."
Nelson said the general feeling of the task force members about climate models echoed the report's findings that models were not yet realistic enough to evaluate global warming definitively.
Nelson said he suggested the Columbia study in part because of his own skepticism of atmospheric modeling. A former Air Force pilot, he had sometimes found himself in hairy situations because of inaccurate weather forecasting based on modeling.
"Everybody kept talking about the doubling of CO2 in the atmosphere, and we wanted to know where the numbers came from, what kind of assumptions were in the models as definitely as possible," he said, "because I had the experience from a long time before that if you put garbage in, you get garbage out."
Task force members also wanted to understand the modeling because they worried that the predictions could lead to what they believed were unnecessary regulations. "Where we were also coming from, we felt we didn't want the EPA throwing a lot of new rules at this until we knew more precisely what would be the most effective methods of solving the problem," Nelson said.
The report accurately described climate physics and the models used in the early 1980s, said Anthony Del Genio, a NASA atmospheric scientist and expert on the general circulation model and climate feedbacks, who recently read the Columbia document.
But it also reflected "biases prevalent in the academic community at the time" that simpler models were better than the general circulation one, Del Genio said in an email. Further, the report's "failure to critically evaluate the models, some of it justified by the limited knowledge at that time but some of it a failure to think critically about the simple models, is its greatest weakness."
Del Genio also questioned why API commissioned such a paper when the National Academy of Sciences had issued a definitive assessment of climate models in 1979, known as the Charney report.
More telling is what API did with the information once they read their own report. "API could have used that knowledge to invest in developing solutions to climate change," said Peter Frumhoff, director of science and policy for UCS.
Instead, a year after the task force circulated the report to API's members, the organization disbanded the committee and shifted its work on climate change from the environment directorate to its lobbying arm.
The industry's lobbying effort over the years sought to emphasize the uncertainties surrounding global warming, even as the models improved and the scientific consensus around man-made climate change grew stronger. Throughout the 1990s, for instance, it joined Exxon and other fossil fuel interests in the Global Climate Coalition (GCC), whose objective was to derail international efforts to curb greenhouse emissions by questioning climate science. In 1998, API coordinated a multi-million dollar campaign to convince the public and policymakers that the Kyoto Protocol was based on tenuous science.
The groups declared victory when President George W. Bush pulled the U.S. out of the Kyoto Protocol in 2001.
A June 2001 briefing memorandum records a top State Department official thanking the GCC because Bush "rejected the Kyoto Protocol in part, based on input from you." |
/**
* Generate (write to the writer) the Java script code for the interactive
* map .
*/
public String generateJS() {
List<InteractiveMapContainer> containers = new ArrayList<InteractiveMapContainer>();
List<InteractiveMapArea> areas = new ArrayList<InteractiveMapArea>();
fillContainersAndAreas(tooltipDescriptor.getRoot(), null, containers,
areas);
CSSDeclarationBlock containerFrameCss = null;
if (frameColor != null) {
containerFrameCss = new CSSDeclarationBlock(POSITION, "absolute")
.setBorder("3px", "solid",
ColorUtils.toHtmlString(frameColor));
}
CSSDeclarationBlock[] css = new CSSDeclarationBlock[] { TOOLTIP_DIV,
CSSMananger.TOOL_TIP_CAPTION, TOOLTIP_CELL_EVEN,
TOOLTIP_CELL_ODD, AREA_HIGHLIGHT_CSS, containerFrameCss };
return BaseJSModule
.installInteractiveMap(imageId, css, CollectionUtils.toArray(
displayList.getKeyList(), String.class),
CollectionUtils.toArray(containers,
InteractiveMapContainer.class), CollectionUtils
.toArray(areas, InteractiveMapArea.class));
} |
There is a conventional light fixture which is used after being attached to a power track arranged on a ceiling or the like. Such a light fixture includes a lamp and a down-transformer section serving as a device for lighting the lamp. However, since the size of the down-transformer section is not so small that the down-transformer section can be stored in the power track, the down-transformer section is stored in an outer envelope arranged independently of the lamp, and the down-transformer section is arranged under the power track. Therefore, the lamp and the outer envelope disadvantageously overhang under the power track in appearance. As a method of reducing the circuit of the down-transformer section to suppress the overhang of the outer envelope, the following two methods, i.e., a method of reducing the circuit of the down-transformer section and a method of devising a structure of a printed circuit board on which the electronic parts of the down-transformer section are mounted to reduce the down-transformer section in size are known.
In the two solving methods, a light fixture including the down-transformer section which is reduced in size to have preferable appearance when attached to the power track is proposed in JP11-111040A. However, this light fixture uses, as a lamp, an incandescent lamp lighted by the down-transformer section, a halogen lamp, or the like. On the other hand, a light fixture using a high-pressure discharge lamp such as an HID (High Intensity Discharged) lamp has a problem in which the circuit cannot be reduced much in size, since the number of parts is large due to a complex circuit configuration of the device for lighting the high-pressure discharge lamp (as described later) and temperatures of the electronic parts increase.
FIG. 28 shows a basic circuit of a conventional high-pressure discharge lamp lighting device. The device includes a rectifier circuit 92 containing a step-up chopper, a power control circuit 97 containing a step-down chopper, a polarity inversion circuit 93 containing a full bridge circuit, an high-pressure pulse generation circuit Ig, a control circuit 96 for performing drive control of a switching element Q95 for the step-up chopper, and a control circuit 98 for performing drive control for a switching element Q96 for the step-down chopper. Each circuit will be described below.
The rectifier circuit 92 includes a so-called step-up chopper circuit containing an inductor L93, a diode D95, a capacitor C95, and a switching element Q95 such as a MOSFET (Metal Oxide Silicon Field Effect Transistor) and a bridge rectifier DB. The bridge rectifier DB fully rectifies an AC voltage from a commercial AC power supply AC to generate a pulsating-flow voltage. The step-up chopper circuit converts the pulsating-flow voltage generated by the bridge rectifier DB into a DC voltage to output the DC voltage.
The power control circuit 97 contains the switching element Q96 such as a MOSFET turning on/off at several 10 KHz, a diode D96, an inductor L94, and a capacitor C96, an output current of which is of chopping-wave shape. A voltage depending on an output current output from the power control circuit 97 is induced at the secondary winding of the inductor L94. The induced voltage is input to the control circuit 98 through a series-connected resistor R94. The control circuit 98 performs zero-cross switching control of the switching element Q96 on the basis of the voltage input from the secondary winding of the inductor L94. The capacitor C96 is used to remove harmonic components from the output current of a primary winding of the inductor L94.
The polarity inversion circuit 93 converts a DC voltage from the power control circuit 97 into a rectangular wave AC voltage having a low frequency of several 100 Hz through a full-bridge circuit including switching elements Q91 to Q94 such as MOSFETs and supplies the rectangular wave AC voltage to a high-pressure discharge lamp DL.
The high-pressure pulse generation circuit Ig is for generating a high-pressure pulse to cause dielectric breakdown between the electrodes of the high-pressure discharge lamp DL, and is used to start the high-pressure discharge lamp DL. After the high-pressure discharge lamp DL is started, the operation of the high-pressure pulse generation circuit Ig is halted.
The light fixture including the high-pressure discharge lamp DL lighted by the lighting device is disclosed in JP14-75045A. It is desired that the circuit of the lighting device is changed to be smaller so as to reduce the light fixture in size.
On the other hand, as another solving method, a method of devising the structure of a printed circuit board on which electronic parts are mounted to reduce an electronic circuit module in size is proposed in JP5-327161A. This has a structure in which an auxiliary circuit board is arranged perpendicularly to a main circuit board. The auxiliary circuit board has one pair of substrate support sections projecting from both the longitudinal ends to the main circuit board side, and the main circuit board has fixing holes into which the substrate support sections of the auxiliary circuit board penetrate. When the auxiliary circuit board is mounted on the main circuit board, the substrate support sections are inserted into the fixing holes, so that the auxiliary circuit board can be held perpendicularly to the main circuit board. In this manner, the electronic parts to be mounted on the printed board can be three-dimensionally mounted on the main circuit board and the auxiliary circuit board, and therefore the electronic circuit module can be reduced in size.
However, in this mounting structure, a plurality of terminal pads aligned along the long side of the auxiliary circuit board and a plurality of pads aligned on the upper surface of the main circuit board are soldered on the parts surface side of the main circuit board. For this reason, for example, in case that the soldered surface of the main circuit board is dipped into a solder tank while the auxiliary circuit board is mounted on the parts surface of the main circuit board to solder the part leads of the main circuit board, the auxiliary circuit board cannot be connected to the main circuit board simultaneously with the parts. As a matter of course, even in this mounting structure, if all the parts on the main circuit board are mounted on the upper surface of the main circuit board simultaneously with the auxiliary circuit board, soldering of the parts on the main circuit board and soldering of the auxiliary circuit board and the main circuit board can be simultaneously performed.
However, as indicated in the reference, when the surface mounting is done, reflow soldering is performed, and thus heating by the reflow soldering causes another problem such as positioning errors of the parts mounted on the auxiliary circuit board. In order to prevent the positioning errors, another member which holds the parts on the auxiliary circuit board to prevent the positioning errors of the parts is required to increase the cost disadvantageously. The substrate support sections of the auxiliary circuit board are easily broken. If the substrate support sections are broken, all the substrates cannot be used. Furthermore, since the auxiliary circuit board is mounted on the upper surface (surface on which electronic parts are mounted) of the main circuit board, the terminal pads of the auxiliary circuit board are located above the upper surface of the main circuit board. Therefore, the electronics parts to be mounted on the auxiliary circuit board are mounted at positions which are above the terminal pads and which is distant from the upper surface of the main circuit board, so that the height of the projection of the auxiliary circuit board from the upper surface of the main circuit board is made difficult to be small. For this reason, the electronic circuit module on which the electronic parts cannot be easily downsized.
The present invention has been made in consideration of the above problems, and has as its object to provide a compact high-pressure discharge lamp lighting device and a light fixture equipped with the high-pressure discharge lamp lighting device. |
The present invention relates generally to new and novel improvements in stencils having enhanced wear-resistance, and to methods of manufacturing the same. More particularly, the present invention relates to stencils which may be used, in one advantageous application, for printing solder paste onto the contact pads of printed circuit boards. Stencils in accordance with the present invention are also useful in other applications, including printing processes where the stencils are subjected to mechanical pressure.
Printed circuit boards designed to receive surface-mounted components are often provided with a plurality of contact pads on their surface. Surface-mounted components are mounted by way of connector leads on the individual surface-mounted component's body. In order to mechanically and electrically connect the connector leads of the components to the contact pads of the printed wiring board, the contact pads are typically provided with a layer of solder paste prior to placement of the connector leads thereon. Once the surface-mounted component has been positioned on the appropriate contact pads, the solder paste is melted and subsequently solidifies to form a mechanical and electrical connection between the connector leads of the surface-mounted component and the contact pads.
Metal stencils are often employed to apply the solder paste onto the contact pads. Such stencils typically have a plurality of apertures which are positioned in the stencil in a predetermined pattern to correspond with the pattern of contact pads on a particular printed circuit board.
In use, such stencils are positioned on the surface of the printed circuit boards having the contact pads upon which solder paste is to be applied. The apertures in the stencil are then aligned over the contact pads onto which solder paste is to be applied. Solder paste is then urged mechanically by, for example, a squeegee, across the upper surface of the stencil and through the apertures in the stencil. Islands of solder paste is thereby accurately located on the appropriate contact pads.
These types of stencils are commonly employed in high-volume production processes. However, one disadvantage of many known stencils is that the stencil quality deteriorates quickly due to damage caused by the mechanical pressure which is applied by the squeegee as it passes over the stencil surface and solder paste is urged into the apertures, and subsequently onto the contact pads. In particular, the edges at the junction between the upper surface of the stencil and the side walls of the apertures become rounded and generally damaged, thus leading to a reduction in the quality and definition of the resulting solder paste pattern. Thus, the useful service life of such known stencils is disadvantageously restricted.
Accordingly, an object of the present invention is to provide stencils having enhanced wear-resistance which are resistant to damage and capable of extended service life, and manufacturing methods for making the same.
Another object of the present invention is the provision of stencils having enhanced wear-resistance which can be used for applying solder paste onto contact pads of printed circuit boards and which are resistant to damage and capable of extended service life, and manufacturing methods for making the same.
These and other objects of the present invention are attained by the provision of a stencil having a metal body having first and second major surfaces, and at least one aperture passing through the body from the first major surface to the second major surface. In one preferred embodiment of the present invention, the side wall surfaces of the apertures and at least one of the first major surface and second major surface has a thin diamond coating thereon. Preferably, the diamond coating is applied as a thin film by a low pressure synthesis process.
It should be understood that the term diamond as used in the present description and claims refers to a transparent and hard synthetic carbon material having properties which resemble natural diamonds. Such materials are commonly known in the art as synthetic diamonds or diamondlike carbon.
Other objects, advantages and novel features of the present invention will become apparent in the following detailed description of the invention when considered in conjunction with the accompanying drawings. |
// make
#include "fftx3.hpp"
#include <array>
#include <cstdio>
#include <cassert>
using namespace fftx;
int main(int argc, char* argv[])
{
tracing=true;
const int n =8;
const int ns =3;
const int nd =5;
box_t<3> sbox({{0,0,0}}, {{ns-1, ns-1, ns-1}});
box_t<3> dbox({{n-nd,n-nd,n-nd}}, {{n-1,n-1,n-1}});
box_t<3> rdomain({{0,0,0}}, {{n-1, n-1, n-1}});
box_t<3> freq({{0,0,0}}, {{(n-1)/2+1, n-1, n-1}});
std::array<array_t<3, double>,2> realIntermediates {rdomain, rdomain};
std::array<array_t<3,std::complex<double>>,2> intermediates {freq, freq};
array_t<3,double> input(sbox);
array_t<3,double> output(dbox);
array_t<3,double> symbol(freq);
setInputs(input);
setOutputs(output);
openScalarDAG();
zeroEmbedBox(realIntermediates[0], input);
PRDFT(rdomain.extents(), intermediates[0], realIntermediates[0]);
kernel(symbol, intermediates[1], intermediates[0]);
IPRDFT(rdomain.extents(), realIntermediates[1], intermediates[1]);
extractBox(output, realIntermediates[1]);
closeScalarDAG(intermediates, "hockney");
}
|
Experimental and Modeling Evaluation of Dimethoxymethane as an Additive for High-Pressure Acetylene Oxidation The high-pressure oxidation of acetylenedimethoxymethane (C2H2DMM) mixtures in a tubular flow reactor has been analyzed from both experimental and modeling perspectives. In addition to pressure (20, 40, and 60 bar), the influence of the oxygen availability (by modifying the air excess ratio, ) and the presence of DMM (two different concentrations have been tested, 70 and 280 ppm, for a given concentration of C2H2 of 700 ppm) have also been analyzed. The chemical kinetic mechanism, progressively built by our research group in the last years, has been updated with recent theoretical calculations for DMM and validated against the present results and literature data. Results indicate that, under fuel-lean conditions, adding DMM enhances C2H2 reactivity by increased radical production through DMM chain branching pathways, more evident for the higher concentration of DMM. H-abstraction reactions with OH radicals as the main abstracting species to form dimethoxymethyl (CH3OCHOCH3) and methoxymethoxymethyl (CH3OCH2OCH2) radicals are the main DMM consumption routes, with the first one being slightly favored. There is a competition between -scission and O2-addition reactions in the consumption of both radicals that depends on the oxygen availability. As the O2 concentration in the reactant mixture is increased, the O2-addition reactions become more relevant. The effect of the addition of several oxygenates, such as ethanol, dimethyl ether (DME), or DMM, on C2H2 high-pressure oxidation has been compared. Results indicate that ethanol has almost no effect, whereas the addition of an ether, DME or DMM, shifts the conversion of C2H2 to lower temperatures. Model performance for experimental data sets found in literature a. Dimethoxymethane oxidation in a jet-stirred reactor (JSR) Experiments reported by Vermeire et al. 23 performed in a quartz jet-stirred reactor have also been used to validate the kinetic mechanism. Four different equivalence ratios have been investigated, =∞ (pyrolysis), =2, =1 and =0.25. Simulations have been performed with the continuous stirred-tank reactor of the Chemkin-Pro software package 38 b. Dimethoxymethane oxidation in an atmospheric-pressure tubular-flow reactor The kinetic mechanism has been validated with experiments reported by Marrodn et al. 21 performed in a tubular-flow reactor at atmospheric pressure from pyrolysis to fuel-lean conditions, i.e. the air excess ratio was varied from =0 to =35. Simulations have been performed with the plug-flow reactor module of the Chemkin-Pro software package 38 c. Dimethoxymethane oxidation in a high-pressure tubular-flow reactor The kinetic mechanism has also been validated with experiments reported by Marrodn et al. 20 performed in a tubular-flow reactor at high pressure (20-60 bar). The air excess ratio was varied from =0.7 to =20. Simulations have been performed with the plug-flow reactor module of the Chemkin-Pro software package 38 20, and modeling calculations (lines) obtained with the present mechanism are compared for different air excess ratios (=0.7, =1 and =20). 20, and modeling calculations (lines) obtained with the present mechanism are compared for different air excess ratios (=0.7, =1 and =20). d. Ignition delay times of DMM Ignition delay times reported by Li et al. 26 measured in a shock tube at pressures 1 and 4 atm, for equivalence ratios of 0.5, 1 and 2 have also been used to validate the kinetic mechanism. Simulations have been performed with the closed homogeneous reactor of the Chemkin-Pro software package 38. Results of the comparison of model calculations and experimental data are shown in Figures S19 and S20. Experimental results (symbols) reported by Li et al. 26, and modeling calculations (lines) obtained with the present mechanism are compared for different equivalence ratios (=0.5, =1 and =2) and 1 atm. Experimental results (symbols) reported by Li et al. 26, and modeling calculations (lines) obtained with the present mechanism are compared for different equivalence ratios (=0.5, =1 and =2) and 4 atm. e. Acetylene oxidation in a high-pressure tubular-flow reactor Experiments reported by Gimnez et al. 39, performed in a tubular-flow reactor, have also been used to validate the kinetic mechanism. Two different air excess ratios (=0.99 and =19.4) for two pressures, 59.6 and 49.6 bar, respectively, have been tested. Simulations have been performed with the closed homogeneous reactor of the Chemkin-Pro software package 38 by fixing the gas residence time inside the reactor. Results of the comparison of model calculations and experimental data are shown in Figure S21. Figure S21. Acetylene concentration as a function of temperature during its oxidation at highpressure (59.6 and 49.6 bar). Experimental results (symbols) reported by Gimnez et al. 39, and modeling calculations (lines) obtained with the present mechanism are compared for different air excess ratios (=0.99 and =19.4). |
Zebra Technologies Corporation, a provider of rugged mobile computers, barcode scanners and barcode printers enhanced with software and services to enable real-time enterprise visibility, has introduced the TC20, a rugged, value-driven mobile computer designed to meet the specific needs of small- and medium-sized businesses (SMBs).
The TC20 is a light, durable and long-lasting mobile device developed for indoor use for SMBs in sectors such as retail and hospitality, where balancing device functionality and cost is essential. Zebra has leveraged decades of enterprise innovation in the TC20 to revolutionise the SMB market where the use of outdated pen and paper systems and fragile, underpowered consumer devices is widespread. With the rugged and lightweight TC20, SMBs can now enjoy business-ready functions such as a built-in scanner, longer battery life and better connectivity in a no compromise mobile computer that reflects the design and familiar user interface of a consumer device.
The TC20 comes in affordable models designed for 1D and 2D barcode scanning, and pairing it with the Zebra® RFD2000 UHF RFID sled easily adds UHF RFID tag reading, writing and locationing capabilities, making it ideal for retail store environments. Small businesses can now benefit from integrated enterprise quality barcode and RFID tag reading, eliminating the need for unreliable, slower and power-intensive smartphone cameras.
The TC20 is designed to withstand dusty environments and accidental drops, reducing costly repairs while ensuring longer and more productive use.
With Zebra OneCare™, LifeGuard™ for Android™ solution provides extended security updates over a longer lifespan of the TC20, ensuring the security of the operating system at all times.
Easy to deploy without time-intensive staff training, the TC20’s bright 4.3-inch screen and Android interface provides a familiar, intuitive experience that’s easy to use out of the box. Available in all-touch or “touch and keyboard” form factors, the TC20 allows staff more time to focus on customers.
Designed as an enterprise line of business smartphone, the TC20 is powered by Mobility DNA™ and its unique ecosystem of software such as Datawedge, StageNow and Mobility Extensions (Mx) which helps streamline deployment management and troubleshooting.
Using Zebra’s Workforce Connect Push-To-Talk Express, staff can perform voice communications over wireless LAN to communicate one-on-one or in groups, allowing them to coordinate and react to changing needs in real time and improve customer service.
A wide range of accessories, such as a snap-on power pack to support continuous use, hand straps, holsters and a snap-on trigger handle, allows staff to use the TC20 for a wide range of tasks with reduced strain and improved efficiency. |
Effect of combination of edible oils on blood pressure, lipid profile, lipid peroxidative markers, antioxidant status, and electrolytes in patients with hypertension on nifedipine treatment. OBJECTIVE To determine the effect of combination of edible oils on blood pressure, anthropometric parameters, lipid profile, lipid peroxidative markers, antioxidant status and electrolytes in drug (nifedipine) taking patients with hypertension. METHODS In this study, patients were separated into 4 groups. Normal (n=14), hypertensive patients (n=38), 38 patients under medication with nifedipine were divided into 2 groups nifedipine control (n=12) and nifedipine + oil combination (sesame + sunflower oil) groups (n=26). Sesame and sunflower oil combination was supplied to patients and instructed to use it as the only oil source for 45 days. Blood pressure and anthropometric parameters were measured at baseline and after 45 days. Lipid peroxidative markers, enzymatic and non-enzymatic antioxidants, lipid profile and electrolytes in blood were also measured. The study took place at Rajah Muthiah Medical College and Hospital, Annamalai University, Annamalainagar, India between January 2005 and December 2008. RESULTS Nifedipine and oil-mix consumed patients significantly decreased the blood pressure, lipid peroxidative markers, lipid profile excluding the high density lipoprotein cholesterol (HDL-C), sodium, chloride, and increased enzymatic, non-enzymatic antioxidants, HDL-C and potassium levels when compared to nifedipine alone treated hypertensive patients. CONCLUSION Nifedipine and oil-mix provided good protection over blood pressure and lipid peroxidation, and brought enzymatic and non-enzymatic antioxidants, lipid profile, and electrolytes towards normalcy in hypertensive patients. |
The world’s ‘steering committee’ often refers to the G20 and in less than five months, China will be firmly behind the wheel. At recent G20 summits and meetings in Beijing and Shanghai, government officials, academics and business representatives were asking two questions: what should China do with its G20 presidency and what sort of leader will it be?
China’s G20 presidency has been a long time coming. China sees the G20 as the world’s ‘foremost international economic cooperative mechanism’, unique for giving developing countries an equal voice at the table. Now is China’s time to step up to the plate. One thing was clear from G20 meetings in Beijing and Shanghai: there is no shortage of ideas and risks for China’s upcoming presidency. These tend to fall into three categories: growth, governance and leadership.
On growth, there is an increasing concern that the G20 will fail to deliver on its promise to lift G20 GDP by 2 percent by 2018, with forecasts downgraded five times since the October 2013 baseline. Many countries are also struggling to implement key reforms that underpin the IMF and OECD growth modelling. They include a large increase in public investment in Germany and immigration reform in the United States.
New reforms will be required to fill these gaps, but they might be harder to come by in 2016. China will inherit a much less accommodating political environment than when announcing the growth goal during Australia’s presidency. At that time, a string of elections, stimulus programs and reform priorities at the domestic level all fed directly into the G20 process.
By contrast, the US presidential election might mean a lame duck US at the 2016 summit. China’s leadership, too, could face delay until after it agrees its next five-year plan in mid-2016, its ambition possibly reduced by recent turmoil in Chinese stock markets. Officials and academics are also wary of macroeconomic risks in Europe and the impacts of higher interest rates in the United States.
To achieve the 2 percent goal, China will need to tailor the G20’s agenda to the global growth challenge. For infrastructure, this means focusing less on just public investment and more on leveraging private sector investment. For employment, this means focusing less on small-scale employment programs and more on lifting workforce participation, particularly among women. For competition reforms, it means a greater focus on liberalising product markets, which so far represent the bulk of the 2 percent growth goal but only 16 percent of commitments. And for trade, it means a stronger focus on ‘behind the border’ (non-tariff barriers) reforms to better integrate global value chains.
Events in Europe and the United States will put macroeconomic coordination back on the G20 agenda. Discussions in Beijing suggest the focus will be on tasking the IMF and OECD to produce analysis understand better the spillovers from monetary and fiscal policies and the costs and benefits of different coordination options.
But this will need to be combined with reforms to G20 mechanisms, which have thus far failed to deliver on the G20’s rhetoric of ‘strong, sustainable and balanced growth’. The G20 relies almost solely on peer pressure to secure ambitious commitments. The G20’s peer review process of each member’s commitments is largely isolated from finance deputies, sherpas, ministers and leaders — the very people who are best placed to negotiate more ambitious commitments. Better integrating this process and having greater engagement with external experts and the public will be critical to delivering ambitious structural reforms.
On governance, China’s G20 presidency will present an awkward contradiction: the country chairing the world’s steering committee and driving global growth remains grossly under-represented in key global institutions. Hopefully, this contradiction can be leveraged to achieve more progress on global governance reform. IMF reform will be a key issue for China’s presidency, particularly having the renminbi included in the IMF’s Special Drawing Rights basket and progressing IMF quota reform given that, as highlighted by Tristram Sainsbury from the Lowy Institute, US$369 billion of the IMF’s funding will expire in 2016 and 2017.
Discussions on global governance have moved well beyond just the IMF. There is a strong focus on reforming global energy institutions, notably the International Energy Agency, to include major energy consumers like China to boost funding and bolster energy security. The G20’s agreed ‘principles on energy governance collaboration’ provide the starting point built by Turkey in 2015. G20 governance itself has also been the source of attention, particularly the ‘zombie-like’ idea of a secretariat, which — no matter how many times it is killed-off — seems to keep getting back up.
The question hanging over all these discussions is what sort of G20 leader China will be. Much of the public discussion has thus far focused on Chinese or regional initiatives like the One Belt One Road initiative, the Asian Infrastructure Investment Bank (AIIB) and the Regional Comprehensive Economic Partnership (RCEP). These initiatives have great potential. The AIIB, the World Bank’s Global Infrastructure Facility and the G20’s Global Infrastructure Initiative could all better integrate around a list of bankable projects for leveraging private investment in infrastructure. In addition, RCEP could help better regionalise Asia’s global production networks and reduce the complex overlap among Asian free trade agreements.
If China uses its G20 presidency to strategically position itself, and regional institutions over others then they will quickly exhaust their political capital and irreparably damage their presidency. Instead, the focus must be on working collaboratively on issues that are not only key to the global economy but also things on which China can demonstrate strong leadership. Undertaking bold structural reforms to boost growth and modernising global governance ticks both these boxes. They are areas of focus that will deliver a successful G20 presidency for China. Regardless of its final agenda, China’s presidency is already one of the most hotly anticipated G20 presidencies in some time.
See Also: Does Old Leadership Fit a New Century? |
Cortical surface complexity in frontal and temporal areas varies across subgroups of schizophrenia Schizophrenia is assumed to be a neurodevelopmental disorder, which might involve disturbed development of the cerebral cortex, especially in frontal and medial temporal areas. Based on a novel spherical harmonics approach to measuring complexity of cortical folding, we applied a measure based on fractal dimension (FD) to investigate the heterogeneity of regional cortical surface abnormalities across subgroups of schizophrenia defined by symptom profiles. A sample of 87 patients with DSMIV schizophrenia was divided into three subgroups (based on symptom profiles) with predominantly negative (n = 31), disorganized (n = 23), and paranoid (n = 33) symptoms and each compared to 108 matched healthy controls. While global FD measures were reduced in the right hemisphere of the negative and paranoid subgroups, regional analysis revealed marked heterogeneity of regional FD alterations. The negative subgroup showed most prominent reductions in left anterior cingulate, superior frontal, frontopolar, as well as right superior frontal and superior parietal cortices. The disorganized subgroup showed reductions in bilateral ventrolateral/orbitofrontal cortices, and several increases in the left hemisphere, including inferior parietal, middle temporal, and midcingulate areas. The paranoid subgroup showed only few changes, including decreases in the right superior parietal and left fusiform region, and increase in the left posterior cingulate cortex. Our findings suggest regional heterogeneity of cortical folding complexity, which might be related to biological subgroups of schizophrenia with differing degrees of altered cortical developmental pathology. Hum Brain Mapp 35:16911699, 2014. © 2013 Wiley Periodicals, Inc. |
/*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
#ifndef SHM_COLL_H_INCLUDED
#define SHM_COLL_H_INCLUDED
#include <shm.h>
#include "../posix/shm_inline.h"
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_barrier(MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_BARRIER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_BARRIER);
ret = MPIDI_POSIX_mpi_barrier(comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_BARRIER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_bcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPIR_Comm * comm,
MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_BCAST);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_BCAST);
ret = MPIDI_POSIX_mpi_bcast(buffer, count, datatype, root, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_BCAST);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_allreduce(const void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLREDUCE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLREDUCE);
ret = MPIDI_POSIX_mpi_allreduce(sendbuf, recvbuf, count, datatype, op, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLREDUCE);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_allgather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLGATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLGATHER);
ret = MPIDI_POSIX_mpi_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLGATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_allgatherv(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
const int *recvcounts, const int *displs,
MPI_Datatype recvtype, MPIR_Comm * comm,
MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLGATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLGATHERV);
ret = MPIDI_POSIX_mpi_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts,
displs, recvtype, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLGATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_scatter(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype, int root,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_SCATTER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_SCATTER);
ret = MPIDI_POSIX_mpi_scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, root, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_SCATTER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_scatterv(const void *sendbuf, const int *sendcounts,
const int *displs, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root,
MPIR_Comm * comm_ptr, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_SCATTERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_SCATTERV);
ret = MPIDI_POSIX_mpi_scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf,
recvcount, recvtype, root, comm_ptr, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_SCATTERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_gather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype, int root,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_GATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_GATHER);
ret = MPIDI_POSIX_mpi_gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, root, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_GATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_gatherv(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
const int *recvcounts, const int *displs,
MPI_Datatype recvtype, int root,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_GATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_GATHERV);
ret = MPIDI_POSIX_mpi_gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts,
displs, recvtype, root, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_GATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_alltoall(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLTOALL);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLTOALL);
ret = MPIDI_POSIX_mpi_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLTOALL);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_alltoallv(const void *sendbuf, const int *sendcounts,
const int *sdispls, MPI_Datatype sendtype,
void *recvbuf, const int *recvcounts,
const int *rdispls, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLV);
ret = MPIDI_POSIX_mpi_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
recvcounts, rdispls, recvtype, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_alltoallw(const void *sendbuf, const int *sendcounts,
const int *sdispls,
const MPI_Datatype sendtypes[],
void *recvbuf, const int *recvcounts,
const int *rdispls,
const MPI_Datatype recvtypes[],
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLW);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLW);
ret = MPIDI_POSIX_mpi_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf,
recvcounts, rdispls, recvtypes, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ALLTOALLW);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_reduce(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root,
MPIR_Comm * comm_ptr, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_REDUCE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_REDUCE);
ret = MPIDI_POSIX_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm_ptr, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_REDUCE);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_reduce_scatter(const void *sendbuf, void *recvbuf,
const int *recvcounts,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm_ptr,
MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER);
ret = MPIDI_POSIX_mpi_reduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op,
comm_ptr, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_reduce_scatter_block(const void *sendbuf,
void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm_ptr,
MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER_BLOCK);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER_BLOCK);
ret = MPIDI_POSIX_mpi_reduce_scatter_block(sendbuf, recvbuf, recvcount, datatype,
op, comm_ptr, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_REDUCE_SCATTER_BLOCK);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_scan(const void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_SCAN);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_SCAN);
ret = MPIDI_POSIX_mpi_scan(sendbuf, recvbuf, count, datatype, op, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_SCAN);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_exscan(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Errflag_t * errflag)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_EXSCAN);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_EXSCAN);
ret = MPIDI_POSIX_mpi_exscan(sendbuf, recvbuf, count, datatype, op, comm, errflag);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_EXSCAN);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_neighbor_allgather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype,
MPIR_Comm * comm)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHER);
ret = MPIDI_POSIX_mpi_neighbor_allgather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_neighbor_allgatherv(const void *sendbuf, int sendcount,
MPI_Datatype sendtype,
void *recvbuf,
const int *recvcounts,
const int *displs,
MPI_Datatype recvtype,
MPIR_Comm * comm)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHERV);
ret = MPIDI_POSIX_mpi_neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs, recvtype, comm);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLGATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_neighbor_alltoallv(const void *sendbuf,
const int *sendcounts,
const int *sdispls,
MPI_Datatype sendtype,
void *recvbuf,
const int *recvcounts,
const int *rdispls,
MPI_Datatype recvtype,
MPIR_Comm * comm)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLV);
ret = MPIDI_POSIX_mpi_neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype, comm);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_neighbor_alltoallw(const void *sendbuf,
const int *sendcounts,
const MPI_Aint * sdispls,
const MPI_Datatype * sendtypes,
void *recvbuf,
const int *recvcounts,
const MPI_Aint * rdispls,
const MPI_Datatype * recvtypes,
MPIR_Comm * comm)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLW);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLW);
ret = MPIDI_POSIX_mpi_neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes, comm);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALLW);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_neighbor_alltoall(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
MPIR_Comm * comm)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALL);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALL);
ret = MPIDI_POSIX_mpi_neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_NEIGHBOR_ALLTOALL);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ineighbor_allgather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype,
MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHER);
ret = MPIDI_POSIX_mpi_ineighbor_allgather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ineighbor_allgatherv(const void *sendbuf,
int sendcount,
MPI_Datatype sendtype,
void *recvbuf,
const int *recvcounts,
const int *displs,
MPI_Datatype recvtype,
MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHERV);
ret = MPIDI_POSIX_mpi_ineighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLGATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ineighbor_alltoall(const void *sendbuf, int sendcount,
MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALL);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALL);
ret = MPIDI_POSIX_mpi_ineighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALL);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ineighbor_alltoallv(const void *sendbuf,
const int *sendcounts,
const int *sdispls,
MPI_Datatype sendtype,
void *recvbuf,
const int *recvcounts,
const int *rdispls,
MPI_Datatype recvtype,
MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLV);
ret = MPIDI_POSIX_mpi_ineighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ineighbor_alltoallw(const void *sendbuf,
const int *sendcounts,
const MPI_Aint * sdispls,
const MPI_Datatype * sendtypes,
void *recvbuf,
const int *recvcounts,
const MPI_Aint * rdispls,
const MPI_Datatype * recvtypes,
MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLW);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLW);
ret = MPIDI_POSIX_mpi_ineighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_INEIGHBOR_ALLTOALLW);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ibarrier(MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IBARRIER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IBARRIER);
ret = MPIDI_POSIX_mpi_ibarrier(comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IBARRIER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ibcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IBCAST);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IBCAST);
ret = MPIDI_POSIX_mpi_ibcast(buffer, count, datatype, root, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IBCAST);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iallgather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLGATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLGATHER);
ret = MPIDI_POSIX_mpi_iallgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLGATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iallgatherv(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
const int *recvcounts, const int *displs,
MPI_Datatype recvtype, MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLGATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLGATHERV);
ret = MPIDI_POSIX_mpi_iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts,
displs, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLGATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iallreduce(const void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLREDUCE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLREDUCE);
ret = MPIDI_POSIX_mpi_iallreduce(sendbuf, recvbuf, count, datatype, op, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLREDUCE);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ialltoall(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLTOALL);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLTOALL);
ret = MPIDI_POSIX_mpi_ialltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLTOALL);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ialltoallv(const void *sendbuf, const int *sendcounts,
const int *sdispls, MPI_Datatype sendtype,
void *recvbuf, const int *recvcounts,
const int *rdispls, MPI_Datatype recvtype,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLV);
ret = MPIDI_POSIX_mpi_ialltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
recvcounts, rdispls, recvtype, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ialltoallw(const void *sendbuf, const int *sendcounts,
const int *sdispls,
const MPI_Datatype sendtypes[],
void *recvbuf, const int *recvcounts,
const int *rdispls,
const MPI_Datatype recvtypes[],
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLW);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLW);
ret = MPIDI_POSIX_mpi_ialltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf,
recvcounts, rdispls, recvtypes, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IALLTOALLW);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iexscan(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IEXSCAN);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IEXSCAN);
ret = MPIDI_POSIX_mpi_iexscan(sendbuf, recvbuf, count, datatype, op, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IEXSCAN);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_igather(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype, int root,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IGATHER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IGATHER);
ret = MPIDI_POSIX_mpi_igather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, root, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IGATHER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_igatherv(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
const int *recvcounts, const int *displs,
MPI_Datatype recvtype, int root,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IGATHERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IGATHERV);
ret = MPIDI_POSIX_mpi_igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts,
displs, recvtype, root, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IGATHERV);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ireduce_scatter_block(const void *sendbuf,
void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm,
MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER_BLOCK);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER_BLOCK);
ret = MPIDI_POSIX_mpi_ireduce_scatter_block(sendbuf, recvbuf, recvcount, datatype,
op, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER_BLOCK);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ireduce_scatter(const void *sendbuf, void *recvbuf,
const int *recvcounts,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER);
ret = MPIDI_POSIX_mpi_ireduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IREDUCE_SCATTER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_ireduce(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root,
MPIR_Comm * comm_ptr, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_IREDUCE);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_IREDUCE);
ret = MPIDI_POSIX_mpi_ireduce(sendbuf, recvbuf, count, datatype, op, root, comm_ptr, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_IREDUCE);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iscan(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op,
MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ISCAN);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ISCAN);
ret = MPIDI_POSIX_mpi_iscan(sendbuf, recvbuf, count, datatype, op, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ISCAN);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iscatter(const void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int recvcount, MPI_Datatype recvtype,
int root, MPIR_Comm * comm, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ISCATTER);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ISCATTER);
ret = MPIDI_POSIX_mpi_iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, root, comm, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ISCATTER);
return ret;
}
MPL_STATIC_INLINE_PREFIX int MPIDI_SHM_mpi_iscatterv(const void *sendbuf, const int *sendcounts,
const int *displs, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root,
MPIR_Comm * comm_ptr, MPIR_Request ** req)
{
int ret;
MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_SHM_MPI_ISCATTERV);
MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_SHM_MPI_ISCATTERV);
ret = MPIDI_POSIX_mpi_iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf,
recvcount, recvtype, root, comm_ptr, req);
MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_SHM_MPI_ISCATTERV);
return ret;
}
#endif /* SHM_COLL_H_INCLUDED */
|
// Montgomery square modulo Ord(G), repeated n times
func p256OrdSqr(res, in []byte, n int) {
copy(res, in)
for i := 0; i < n; i += 1 {
p256OrdMul(res, res, res)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.