content
stringlengths 7
2.61M
|
---|
Gianluigi Buffon would choose to move to England if Juventus were to show him the exit door at the end of the season.
The 32-year-old, who remains the world's most expensive goalkeeper after his move for around £32million from Parma to Juve in 2001, is under contract with the Turin giants for three more years but has been linked with Barclays Premier League clubs Arsenal, Manchester United and Manchester City.
"If I had to leave, I think England would be my destination," Buffon told Italy's Sky Sport 24.
"My desire to leave Juve is equal to that of Juve's will to have me leave.
"At this time there is nothing [to discuss] but there are two weeks to go [before the end of the season] and anything can happen.
"Juve always has the priority and we will see if we will decide for our paths to continue together." |
Nursing Care for Adolescent Idiopathic Spine Patients Background: Nursing care for spine patients with adolescent idiopathic scoliosis requires education, plan of care, and involvement of a multi-disciplinary team. Methods: To be a valuable member of the team, nurses caring for the spine patient must learn about the care, operative procedure and outcomes. Results: Nursing intervention can help facilitate patients and families to navigate the hospital course and can reduce the length of a patients hospital stay. Conclusion: Well informed members of a nursing team can assist families and patients, resulting in reduced anxiety and expedite a patients return to daily activities. |
<gh_stars>100-1000
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.ei.businessprocess.integration.tests.humantasks;
import org.apache.axis2.databinding.types.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeGroups;
import org.testng.annotations.Test;
import org.wso2.ei.businessprocess.integration.common.clients.humantasks.HumanTaskClientApiClient;
import org.wso2.ei.businessprocess.integration.common.clients.humantasks.HumanTaskPackageManagementClient;
import org.wso2.ei.businessprocess.integration.common.utils.BPSMasterTest;
import org.wso2.ei.businessprocess.integration.common.utils.BPSTestConstants;
import org.wso2.ei.businessprocess.integration.common.utils.RequestSender;
import org.wso2.carbon.automation.engine.FrameworkConstants;
import org.wso2.carbon.automation.engine.context.AutomationContext;
import org.wso2.carbon.automation.engine.frameworkutils.FrameworkPathUtil;
import org.wso2.carbon.humantask.stub.ui.task.client.api.types.*;
import org.wso2.carbon.integration.common.admin.client.UserManagementClient;
import org.wso2.carbon.integration.common.utils.LoginLogoutClient;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
/**
* Integration test for human task Xpath functions:concat(), concatWithDelimiter(), leastFrequentOccurrence(),
* mostFrequentOccurrence(), voteOnString(),and(), or(), vote(),max(), min(), avg(), sum()
* Test deploys artifacts from test6 folder and they depends on ClaimService service.
*/
public class HumanTaskXpathExtensionsTest extends BPSMasterTest {
private static final Log log = LogFactory.getLog(HumanTaskPeopleAssignment.class);
//Test Automation API Clients
private HumanTaskClientApiClient clerk1Client, clerk2Client, clerk3Client, manager1Client, manager3Client;
private HumanTaskPackageManagementClient humanTaskPackageManagementClient;
private UserManagementClient userManagementClient;
private RequestSender requestSender;
private URI taskID = null;
/**
* Setup the test environment.
* @throws Exception
*/
@BeforeClass(alwaysRun = true)
public void setEnvironment() throws Exception {
init(); //init master class
humanTaskPackageManagementClient = new HumanTaskPackageManagementClient(backEndUrl, sessionCookie);
requestSender = new RequestSender();
initialize();
//initialize HT Client API for Clerk1 user
AutomationContext clerk1AutomationContext = new AutomationContext("BPS", "bpsServerInstance0001",
FrameworkConstants.SUPER_TENANT_KEY, "clerk1");
LoginLogoutClient clerk1LoginLogoutClient = new LoginLogoutClient(clerk1AutomationContext);
String clerk1SessionCookie = clerk1LoginLogoutClient.login();
clerk1Client = new HumanTaskClientApiClient(backEndUrl, clerk1SessionCookie);
//initialize HT Client API for Clerk2 user
AutomationContext clerk2AutomationContext = new AutomationContext("BPS", "bpsServerInstance0001",
FrameworkConstants.SUPER_TENANT_KEY, "clerk2");
LoginLogoutClient clerk2LoginLogoutClient = new LoginLogoutClient(clerk2AutomationContext);
String clerk2SessionCookie = clerk2LoginLogoutClient.login();
clerk2Client = new HumanTaskClientApiClient(backEndUrl, clerk2SessionCookie);
//initialize HT Client API for Clerk3 user
AutomationContext clerk3AutomationContext = new AutomationContext("BPS", "bpsServerInstance0001",
FrameworkConstants.SUPER_TENANT_KEY, "clerk3");
LoginLogoutClient clerk3LoginLogoutClient = new LoginLogoutClient(clerk3AutomationContext);
String clerk3SessionCookie = clerk3LoginLogoutClient.login();
clerk3Client = new HumanTaskClientApiClient(backEndUrl, clerk3SessionCookie);
//initialize HT Client API for Manager1 user
AutomationContext manager1AutomationContext = new AutomationContext("BPS", "bpsServerInstance0001",
FrameworkConstants.SUPER_TENANT_KEY, "manager1");
LoginLogoutClient manager1LoginLogoutClient = new LoginLogoutClient(manager1AutomationContext);
String manager1SessionCookie = manager1LoginLogoutClient.login();
manager1Client = new HumanTaskClientApiClient(backEndUrl, manager1SessionCookie);
//initialize HT Client API for Manager3 user
AutomationContext manager3AutomationContext = new AutomationContext("BPS", "bpsServerInstance0001",
FrameworkConstants.SUPER_TENANT_KEY, "manager3");
LoginLogoutClient manager3LoginLogoutClient = new LoginLogoutClient(manager3AutomationContext);
String manager3SessionCookie = manager3LoginLogoutClient.login();
manager3Client = new HumanTaskClientApiClient(backEndUrl, manager3SessionCookie);
createTask();
}
/**
* Initialize the test by deploying required artifacts.
* @throws Exception
*/
@BeforeGroups(groups = { "wso2.bps.task.people.assignment" })
protected void initialize() throws Exception {
log.info("Initializing HumanTask task creation Test...");
userManagementClient = new UserManagementClient(backEndUrl, sessionCookie);
addRoles();
humanTaskPackageManagementClient = new HumanTaskPackageManagementClient(backEndUrl, sessionCookie);
log.info("Add users success !");
humanTaskPackageManagementClient
.unDeployHumanTask(HumanTaskTestConstants.CLAIMS_APPROVAL_PACKAGE_ORG_ENTITY_NAME, "ApproveClaim");
deployArtifact();
requestSender.waitForProcessDeployment(backEndUrl + "ClaimService");
}
/**
* deployArtifact() test1 sample Generic Human Roles. potentialOwners - htd:getInput("ClaimApprovalRequest")/test10:cust/test10:owners
* businessAdministrators - htd:union(htd:getInput("ClaimApprovalRequest")/test10:cust/test10:globleAdmins,htd:getInput("ClaimApprovalRequest")/test10:cust/test10:regionalAdmins)
* excludedOwners - htd:getInput("ClaimApprovalRequest")/test10:cust/test10:excludedOwners
* @throws Exception
*/
public void deployArtifact() throws Exception {
final String artifactLocation =
FrameworkPathUtil.getSystemResourceLocation() + BPSTestConstants.DIR_ARTIFACTS + File.separator
+ BPSTestConstants.DIR_HUMAN_TASK + File.separator
+ HumanTaskTestConstants.DIR_PEOPLE_ASSIGNMENT + File.separator + "test6";
uploadHumanTaskForTest(HumanTaskTestConstants.CLAIMS_APPROVAL_PACKAGE_ORG_ENTITY_NAME, artifactLocation);
}
/**
* Add required user roles to the test server
* @throws Exception
*/
private void addRoles() throws Exception {
String[] rc1 = new String[] { HumanTaskTestConstants.CLERK1_USER, HumanTaskTestConstants.CLERK2_USER,
HumanTaskTestConstants.CLERK3_USER };
String[] rc2 = new String[] { HumanTaskTestConstants.CLERK3_USER, HumanTaskTestConstants.CLERK4_USER,
HumanTaskTestConstants.CLERK5_USER };
String[] rc3 = new String[] { HumanTaskTestConstants.CLERK4_USER, HumanTaskTestConstants.CLERK5_USER,
HumanTaskTestConstants.CLERK6_USER };
String[] rm1 = new String[] { HumanTaskTestConstants.MANAGER1_USER, HumanTaskTestConstants.MANAGER2_USER };
String[] rm2 = new String[] { HumanTaskTestConstants.MANAGER2_USER, HumanTaskTestConstants.MANAGER3_USER };
userManagementClient.addRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE, rc1,
new String[] { "/permission/admin/login", "/permission/admin/manage/humantask/viewtasks" }, false);
userManagementClient.addRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE_2, rc2,
new String[] { "/permission/admin/login", "/permission/admin/manage/humantask/viewtasks" }, false);
userManagementClient.addRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE_3, rc3,
new String[] { "/permission/admin/login", "/permission/admin/manage/humantask/viewtasks" }, false);
userManagementClient.addRole(HumanTaskTestConstants.REGIONAL_MANAGER_ROLE, rm1,
new String[] { "/permission/admin/login", "/permission/admin/manage/humantask" }, false);
userManagementClient.addRole(HumanTaskTestConstants.REGIONAL_MANAGER_ROLE_2, rm2,
new String[] { "/permission/admin/login", "/permission/admin/manage/humantask" }, false);
}
/**
* Create a new human task
* @throws Exception
*/
private void createTask() throws Exception {
String soapBody =
"<sch:ClaimApprovalData xmlns:sch=\"http://www.example.com/claims/schema\" xmlns:ns=\"http://docs.oasis-open.org/ns/bpel4people/ws-humantask/types/200803\">\n"
+
" <sch:cust>\n" +
" <sch:id>123</sch:id>\n" +
" <sch:initial>A</sch:initial>\n" +
" <sch:initial>B</sch:initial>\n" +
" <sch:initial>C</sch:initial>\n" +
" <sch:firstname>Hasitha</sch:firstname>\n" +
" <sch:lastname>Aravinda</sch:lastname>\n" +
" <sch:othername>name1</sch:othername>" +
" <sch:othername>name2</sch:othername>" +
" <sch:othername>name3</sch:othername>" +
" <sch:custRegion>LK</sch:custRegion>" +
" <sch:custRegion>UK</sch:custRegion>" +
" <sch:custRegion>LK</sch:custRegion>" +
" <sch:custRegion>LK</sch:custRegion>" +
" <sch:custRegion>UK</sch:custRegion>" +
" <sch:custRegion>US</sch:custRegion>" +
" <sch:custRegion>IN</sch:custRegion>" +
" <sch:custRegion>DK</sch:custRegion>" +
" <sch:custRegion>DK</sch:custRegion>" +
" <sch:custArrears>arr_DK</sch:custArrears>" +
" <sch:custArrears>arr_DK</sch:custArrears>" +
" <sch:custArrears>arr_DK</sch:custArrears>" +
" <sch:custArrears>arr_LK</sch:custArrears>" +
" <sch:custArrears>arr_LK</sch:custArrears>" +
" <sch:custArrears>arr_LK</sch:custArrears>" +
" <sch:custArrears>arr_UK</sch:custArrears>" +
" <sch:custArrears>arr_UK</sch:custArrears>" +
" <sch:custArrears>arr_SW</sch:custArrears>" +
" <sch:boolTrue>true</sch:boolTrue>" +
" <sch:boolTrue>1</sch:boolTrue>" +
" <sch:boolTrue>TRUE</sch:boolTrue>" +
" <sch:boolTrue>True</sch:boolTrue>" +
" <sch:boolFalse>false</sch:boolFalse>" +
" <sch:boolFalse>0</sch:boolFalse>" +
" <sch:boolFalse>False</sch:boolFalse>" +
" <sch:boolFalse>FALSE</sch:boolFalse>" +
" <sch:boolMix>FALSE</sch:boolMix>" +
" <sch:boolMix>true</sch:boolMix>" +
" <sch:boolMix>0</sch:boolMix>" +
" <sch:boolMix>1</sch:boolMix>" +
" <sch:boolMix>1</sch:boolMix>" +
" <sch:amount>2500</sch:amount>" +
" <sch:amount>2000</sch:amount>" +
" <sch:amount>500</sch:amount>" +
" <sch:amount>-500</sch:amount>" +
" <sch:amount>4000</sch:amount>" +
" <sch:amount>3500</sch:amount>" +
" <sch:owners>\n" +
" <ns:group>" + HumanTaskTestConstants.REGIONAL_CLERKS_ROLE + "</ns:group>\n" +
" </sch:owners>\n" +
" <sch:excludedOwners>\n" +
" <ns:user>" + HumanTaskTestConstants.CLERK3_USER + "</ns:user>\n" +
" </sch:excludedOwners>\n" +
" <sch:globleAdmins>\n" +
" <ns:group>" + HumanTaskTestConstants.REGIONAL_MANAGER_ROLE + "</ns:group>\n" +
" </sch:globleAdmins>\n" +
" <sch:regionalAdmins>\n" +
" <ns:group>" + HumanTaskTestConstants.REGIONAL_MANAGER_ROLE_2 + "</ns:group>\n" +
" </sch:regionalAdmins>\n" +
" </sch:cust>\n" +
" <sch:amount>2500</sch:amount>\n" +
" <sch:region>lk</sch:region>\n" +
" <sch:priority>7</sch:priority>\n" +
" </sch:ClaimApprovalData>";
String operation = "approve";
String serviceName = "ClaimService";
List<String> expectedOutput = new ArrayList<String>();
expectedOutput.add("taskid>");
log.info("Calling Service: " + backEndUrl + serviceName);
requestSender.sendRequest(backEndUrl + serviceName, operation, soapBody, 1, expectedOutput, true);
setTaskID();
}
private void setTaskID() throws Exception {
//Clerk1 can claim this task.
TSimpleQueryInput queryInput = new TSimpleQueryInput();
queryInput.setPageNumber(0);
queryInput.setSimpleQueryCategory(TSimpleQueryCategory.CLAIMABLE);
//Query as Clerk1 user
TTaskSimpleQueryResultSet taskResults = clerk1Client.simpleQuery(queryInput);
TTaskSimpleQueryResultRow[] rows = taskResults.getRow();
TTaskSimpleQueryResultRow b4pTask = rows[0];
this.taskID = b4pTask.getId();
}
/**
* Test the Human task Xpath string functions - concat(),concatWithDelimiter();mostFrequentOccurrence(),leastFrequentOccurrence()
* @throws Exception
*/
@Test(groups = {
"wso2.bps.task.xpath" }, description = "Test Xpath string operations", priority = 10, singleThreaded = true)
public void testStringFunctions()
throws Exception {
TTaskAbstract humanTask = manager1Client.loadTask(taskID);
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("ABC"),
"Concat() method test, subject should contain ABC");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("name1 name2 name3"),
"concatWithDelimiter() method test, should contain name1 name2 name3");
//happy scenario for mostFrequentOccurrence()
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("LK"),
"mostFrequentOccurrence() test, should contain LK");
//when there is a tie, empty is returned
Assert.assertFalse(humanTask.getPresentationSubject().getTPresentationSubject().contains("arr_DK") || humanTask
.getPresentationSubject().getTPresentationSubject().contains("arr_LK") || humanTask
.getPresentationSubject().getTPresentationSubject().contains("arr_UK") || humanTask
.getPresentationSubject().getTPresentationSubject().contains("arr_US"),
"mostFrequentOccurrence() test, should not contain any arr_* since a tie");
//happy scenario for leastFrequentOccurrence()
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("arr_SW"),
"leastFrequentOccurrence() test, should contain arr_US");
//when there is a tie empty is returned
Assert.assertFalse(
humanTask.getPresentationDescription().getTPresentationDescription().contains("LK") || humanTask
.getPresentationDescription().getTPresentationDescription().contains("UK") || humanTask
.getPresentationDescription().getTPresentationDescription().contains("IN") || humanTask
.getPresentationDescription().getTPresentationDescription().contains("DK") || humanTask
.getPresentationDescription().getTPresentationDescription().contains("US"),
"leastFrequentOccurrence() test, should not contain any region since a tie");
//voteOnString with 40%, has only 33%
Assert.assertFalse(humanTask.getPresentationSubject().getTPresentationSubject().contains("vote40_LK"),
"vote() should not return highest occurrence LK, since low percentage");
//vote on 20%
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("vote20_LK"),
"vote() should return LK");
//empty nodeset
Assert.assertFalse(humanTask.getPresentationSubject().getTPresentationSubject().contains("voteEmpty_LK"),
"vote() should return empty string for empty list");
}
/**
* Test the Human task Xpath boolean functions - and(),or(),vote()
* @throws Exception
*/
@Test(groups = {
"wso2.bps.task.xpath" }, description = "Test Xpath boolean operations", priority = 10, singleThreaded = true)
public void testBooleanFunctions()
throws Exception {
TTaskAbstract humanTask = manager1Client.loadTask(taskID);
//And function
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("andTrue_true"),
"and() should return true when all true");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("andFalse_false"),
"and() should return false when all false");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("andMix_false"),
"and() should return false when mix boolean inputs");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("andEmpty_false"),
"an() should return false when empty node set");
//or function
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("orTrue_true"),
"or() should return true when all true");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("orFalse_false"),
"or() should return false when all false");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("orMix_true"),
"or() should return true when mix boolean inputs");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("orEmpty_false"),
"or()should return false when empty set");
//vote function
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("vote10_true"),
"vote() should return true since true has 60%");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("vote70_false"),
"vote() should return false since true has 60%");
Assert.assertTrue(humanTask.getPresentationSubject().getTPresentationSubject().contains("vote5_false"),
"vote() should return false since false is highest");
}
/**
* Test the Human task Xpath Number functions - avg(),min(),max()
* @throws Exception
*/
@Test(groups = {
"wso2.bps.task.xpath" }, description = "Test Xpath number operations", priority = 10, singleThreaded = true)
public void testNumberFunctions()
throws Exception {
TTaskAbstract humanTask = manager1Client.loadTask(taskID);
//avg
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("avg_2000"),
"avg() should return 2000");
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("min_-500"),
"min() should return -500");
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("max_4000"),
"max() should return 4000");
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("avg_NaN"),
"avg() should return NaN for empty list");
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("min_NaN"),
"min() should return NaN for empty list");
Assert.assertTrue(humanTask.getPresentationDescription().getTPresentationDescription().contains("max_NaN"),
"max() should return NaN for empty list");
}
/**
* Clenup the test environment after the test.
* @throws Exception
*/
@AfterClass(groups = { "wso2.bps.task.clean" }, description = "Clean up server")
public void cleanTestEnvironment()
throws Exception {
userManagementClient.deleteRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE);
userManagementClient.deleteRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE_2);
userManagementClient.deleteRole(HumanTaskTestConstants.REGIONAL_CLERKS_ROLE_3);
userManagementClient.deleteRole(HumanTaskTestConstants.REGIONAL_MANAGER_ROLE);
userManagementClient.deleteRole(HumanTaskTestConstants.REGIONAL_MANAGER_ROLE_2);
humanTaskPackageManagementClient
.unDeployHumanTask(HumanTaskTestConstants.CLAIMS_APPROVAL_PACKAGE_ORG_ENTITY_NAME, "ApproveClaim");
loginLogoutClient.logout();
}
}
|
package multitoken
import (
"fmt"
"hyperledger.abchain.org/chaincode/modules/generaltoken"
"regexp"
)
var baseVerifier = regexp.MustCompile(`[A-Za-z0-9]{4,16}`)
func baseNameVerifier(name string) error {
ret := baseVerifier.FindString(name)
if len(ret) < len(name) {
return fmt.Errorf("Token name [%s] contain invalid part", name)
}
return nil
}
func (mtoken *baseMultiTokenTx) GetToken(name string) (generaltoken.TokenTx, error) {
if err := baseNameVerifier(name); err != nil {
return nil, err
}
//mtoken.ChaincodeRuntime.Core.SetEvent("TOKENNAME", []byte(name))
subrt := mtoken.ChaincodeRuntime.SubRuntime(name)
return generaltoken.NewTokenTxImpl(subrt, mtoken.nonce, mtoken.tokenNonce), nil
}
|
The Ontario Trucking Association struck an 11th-hour agreement with the provincial government to exempt trucks that operate outside of Ontario from a workplace smoking ban.
The Smoke Free Ontario Act went into effect June 1 to ban smoking from enclosed public spaces and workplaces, and the measure was rumored to have included all trucks.
OTA and Ministry of Health Prevention officials met Wednesday, according to OTA, to clarify an exemption for federally regulated carriers.
“Our concern was over provincial incursion into an area of federal law and what precedent that might set in other statutes in the future,” OTA President David Bradley said in a statement.
Federal carriers in Canada will still be governed under the federal government’s Non-Smokers Health Act, which respects the rights of non-smokers in the workplace.
Provincially regulated carriers in Ontario must now follow the Smoke Free Ontario Act, which has strict penalties for violating certain sections, including fines up to $300,000.
OTA made it clear in its statement that the association does not challenge or dispute the health risks associated with smoking, only that the exemption for federally regulated carriers be clarified. |
/*
* Copyright © 2008-2010 dragchan <<EMAIL>>
* This file is part of FbTerm.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "input.h"
#include <linux/input.h>
#include <linux/kd.h>
#include <linux/kdev_t.h>
#include <linux/vt.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/vt.h>
#include <termios.h>
#include <unistd.h>
#include "fbconfig.h"
#include "fbshell.h"
#include "fbshellman.h"
#include "fbterm.h"
#include "improxy.h"
#include "input_key.h"
static const s8 show_cursor[] = "\e[?25h";
static const s8 hide_cursor[] = "\e[?25l";
static const s8 disable_blank[] = "\e[9;0]";
static const s8 enable_blank[] = "\e[9;10]";
static const s8 clear_screen[] = "\e[2J\e[H";
DEFINE_INSTANCE(TtyInput)
class TtyInputVT : public TtyInput, public IoPipe
{
friend class TtyInput;
public:
void switchVc(bool enter);
void setRawMode(bool raw, bool force = false);
void showInfo(bool verbose);
bool isActive(void);
protected:
TtyInputVT();
~TtyInputVT();
private:
virtual void readyRead(s8* buf, u32 len);
void setupSysKey(bool restore);
void processRawKeys(s8* buf, u32 len);
bool mRawMode;
termios oldTm;
long oldKbMode;
bool keymapFailure = false;
bool inited = false;
};
class TtyInputNull : public TtyInput
{
friend class TtyInput;
public:
void switchVc(bool enter);
void setRawMode(bool raw, bool force = false);
void showInfo(bool verbose);
protected:
TtyInputNull();
};
TtyInput* TtyInput::createInstance()
{
bool write_only;
s8 buf[64];
Config::instance()->getOption("write-only", write_only);
if (write_only)
return new TtyInputNull();
if (ttyname_r(STDIN_FILENO, buf, sizeof(buf)))
{
fprintf(stderr, "stdin isn't a tty!\n");
return 0;
}
if (!strstr(buf, "/dev/tty") && !strstr(buf, "/dev/vc"))
{
fprintf(stderr, "stdin isn't a interactive tty!\n");
return 0;
}
return new TtyInputVT();
}
TtyInputVT::TtyInputVT()
{
setFd(dup(STDIN_FILENO));
s32 ret = ::write(STDIN_FILENO, hide_cursor, sizeof(hide_cursor) - 1);
ret = ::write(STDIN_FILENO, disable_blank, sizeof(disable_blank) - 1);
struct vt_mode vtm;
vtm.mode = VT_PROCESS;
vtm.waitv = 0;
vtm.relsig = SIGUSR1;
vtm.acqsig = SIGUSR2;
vtm.frsig = 0;
ioctl(STDIN_FILENO, VT_SETMODE, &vtm);
}
TtyInputVT::~TtyInputVT()
{
if (!inited)
return;
setupSysKey(true);
ioctl(STDIN_FILENO, KDSKBMODE, oldKbMode);
tcsetattr(STDIN_FILENO, TCSAFLUSH, &oldTm);
s32 ret = ::write(STDIN_FILENO, show_cursor, sizeof(show_cursor) - 1);
ret = ::write(STDIN_FILENO, enable_blank, sizeof(enable_blank) - 1);
ret = ::write(STDIN_FILENO, clear_screen, sizeof(clear_screen) - 1);
}
bool TtyInput::isActive(void)
{
return true;
}
void TtyInputVT::showInfo(bool verbose)
{
if (keymapFailure)
{
printf(
"[input] can't change kernel keymap table, all shortcuts will NOT "
"work! see SECURITY NOTES section of man page for solution.\n");
}
}
void TtyInputVT::switchVc(bool enter)
{
setupSysKey(!enter);
if (!enter)
{
ioctl(STDIN_FILENO, VT_RELDISP, 1);
return;
}
if (inited)
return;
inited = true;
tcgetattr(STDIN_FILENO, &oldTm);
ioctl(STDIN_FILENO, KDGKBMODE, &oldKbMode);
setRawMode(false, true);
termios tm = oldTm;
cfmakeraw(&tm);
tm.c_cc[VMIN] = 1;
tm.c_cc[VTIME] = 0;
tcsetattr(STDIN_FILENO, TCSAFLUSH, &tm);
}
void TtyInputVT::setupSysKey(bool restore)
{
#define T_SHIFT (1 << KG_SHIFT)
#define T_CTRL (1 << KG_CTRL)
#define T_CTRL_ALT ((1 << KG_CTRL) + (1 << KG_ALT))
static bool syskey_saved = false;
static struct KeyEntry
{
u8 table;
u8 keycode;
u16 new_val;
u16 old_val;
} sysKeyTable[] = {
{T_SHIFT, KEY_PAGEUP, SHIFT_PAGEUP},
{T_SHIFT, KEY_PAGEDOWN, SHIFT_PAGEDOWN},
{T_SHIFT, KEY_LEFT, SHIFT_LEFT},
{T_SHIFT, KEY_RIGHT, SHIFT_RIGHT},
{T_CTRL, KEY_SPACE, CTRL_SPACE},
{T_CTRL_ALT, KEY_1, CTRL_ALT_1},
{T_CTRL_ALT, KEY_2, CTRL_ALT_2},
{T_CTRL_ALT, KEY_3, CTRL_ALT_3},
{T_CTRL_ALT, KEY_4, CTRL_ALT_4},
{T_CTRL_ALT, KEY_5, CTRL_ALT_5},
{T_CTRL_ALT, KEY_6, CTRL_ALT_6},
{T_CTRL_ALT, KEY_7, CTRL_ALT_7},
{T_CTRL_ALT, KEY_8, CTRL_ALT_8},
{T_CTRL_ALT, KEY_9, CTRL_ALT_9},
{T_CTRL_ALT, KEY_0, CTRL_ALT_0},
{T_CTRL_ALT, KEY_C, CTRL_ALT_C},
{T_CTRL_ALT, KEY_D, CTRL_ALT_D},
{T_CTRL_ALT, KEY_E, CTRL_ALT_E},
{T_CTRL_ALT, KEY_F1, CTRL_ALT_F1},
{T_CTRL_ALT, KEY_F2, CTRL_ALT_F2},
{T_CTRL_ALT, KEY_F3, CTRL_ALT_F3},
{T_CTRL_ALT, KEY_F4, CTRL_ALT_F4},
{T_CTRL_ALT, KEY_F5, CTRL_ALT_F5},
{T_CTRL_ALT, KEY_F6, CTRL_ALT_F6},
{T_CTRL_ALT, KEY_K, CTRL_ALT_K},
};
if (!syskey_saved && restore)
return;
seteuid(0);
s8 imapp[128];
Config::instance()->getOption("input-method", imapp, sizeof(imapp));
for (u32 i = 0; i < sizeof(sysKeyTable) / sizeof(KeyEntry); i++)
{
if (!imapp[0] && (sysKeyTable[i].new_val == CTRL_SPACE ||
sysKeyTable[i].new_val == CTRL_ALT_K))
continue;
kbentry entry;
entry.kb_table = sysKeyTable[i].table;
entry.kb_index = sysKeyTable[i].keycode;
if (!syskey_saved)
{
ioctl(STDIN_FILENO, KDGKBENT, &entry);
sysKeyTable[i].old_val = entry.kb_value;
}
entry.kb_value =
(restore ? sysKeyTable[i].old_val : sysKeyTable[i].new_val);
s32 ret = ioctl(STDIN_FILENO, KDSKBENT,
&entry); // should have perm CAP_SYS_TTY_CONFIG
if (!keymapFailure && ret == -1)
keymapFailure = true;
}
if (!syskey_saved && !restore)
syskey_saved = true;
seteuid(getuid());
}
void TtyInputVT::readyRead(s8* buf, u32 len)
{
if (mRawMode)
{
processRawKeys(buf, len);
return;
}
FbShell* shell = FbShellManager::instance()->activeShell();
u32 start = 0;
for (u32 i = 0; i < len; i++)
{
u32 orig = i;
u16 c = (u8)buf[i];
if ((c >> 5) == 0x6 && i < (len - 1) && (((u8)buf[++i]) >> 6) == 0x2)
{
c = ((c & 0x1f) << 6) | (buf[i] & 0x3f);
if (c < AC_START || c > AC_END)
continue;
if (shell && orig > start)
shell->keyInput(buf + start, orig - start);
start = i + 1;
FbTerm::instance()->processSysKey(c);
}
}
if (shell && len > start)
shell->keyInput(buf + start, len - start);
}
static u16 down_num;
static bool key_down[NR_KEYS];
static u8 shift_down[NR_SHIFT];
static u16 shift_state;
void TtyInputVT::setRawMode(bool raw, bool force)
{
if (!force && raw == mRawMode)
return;
mRawMode = raw;
ioctl(STDIN_FILENO, KDSKBMODE, mRawMode ? K_MEDIUMRAW : K_UNICODE);
if (mRawMode)
{
down_num = 0;
shift_state = 0;
memset(key_down, 0, sizeof(bool) * NR_KEYS);
memset(shift_down, 0, sizeof(char) * NR_SHIFT);
}
else
{
if (!down_num)
return;
FbShell* shell = FbShellManager::instance()->activeShell();
if (!shell)
return;
u32 num = down_num;
for (u32 i = 0; i < NR_KEYS; i++)
{
if (!key_down[i])
continue;
s8 code = i | 0x80;
shell->keyInput(&code, 1);
if (!--num)
break;
}
}
}
void TtyInputVT::processRawKeys(s8* buf, u32 len)
{
FbShell* shell = FbShellManager::instance()->activeShell();
u32 start = 0;
for (u32 i = 0; i < len; i++)
{
bool down = !(buf[i] & 0x80);
u16 code = buf[i] & 0x7f;
u32 orig = i;
if (!code)
{
if (i + 2 >= len)
break;
code = (buf[++i] & 0x7f) << 7;
code |= buf[++i] & 0x7f;
if (!(buf[i] & 0x80) || !(buf[i - 1] & 0x80))
continue;
}
if (code >= NR_KEYS)
continue;
if (down ^ key_down[code])
{
if (down)
down_num++;
else
down_num--;
}
else if (!down)
{
if (shell && orig > start)
shell->keyInput(buf + start, orig - start);
start = i + 1;
}
bool rep = (down && key_down[code]);
key_down[code] = down;
struct kbentry ke;
ke.kb_table = shift_state;
ke.kb_index = code;
if (ioctl(STDIN_FILENO, KDGKBENT, &ke) == -1)
continue;
u16 value = KVAL(ke.kb_value);
u16 syskey = 0, switchvc = 0;
switch (KTYP(ke.kb_value))
{
case KT_LATIN:
if (value >= AC_START && value <= AC_END)
syskey = value;
break;
case KT_CONS:
switchvc = value + 1;
break;
case KT_SHIFT:
if (rep || value >= NR_SHIFT)
break;
if (value == KVAL(K_CAPSSHIFT))
value = KVAL(K_SHIFT);
if (down)
shift_down[value]++;
else if (shift_down[value])
shift_down[value]--;
if (shift_down[value])
shift_state |= (1 << value);
else
shift_state &= ~(1 << value);
break;
default:
break;
}
if (down && (syskey || switchvc))
{
if (shell && i >= start)
shell->keyInput(buf + start, i - start + 1);
start = i + 1;
if (syskey)
{
FbTerm::instance()->processSysKey(syskey);
}
else
{
ioctl(STDIN_FILENO, VT_ACTIVATE, switchvc);
}
}
}
if (shell && len > start)
shell->keyInput(buf + start, len - start);
}
bool TtyInputVT::isActive(void)
{
struct vt_stat vtstat;
ioctl(STDIN_FILENO, VT_GETSTATE, &vtstat);
struct stat ttystat;
fstat(STDIN_FILENO, &ttystat);
return vtstat.v_active == MINOR(ttystat.st_rdev);
}
void TtyInputNull::switchVc(bool enter)
{
}
void TtyInputNull::setRawMode(bool raw, bool force)
{
}
void TtyInputNull::showInfo(bool verbose)
{
}
TtyInputNull::TtyInputNull()
{
}
|
// NodeRegister is exported
// register to cluster discovery
func NodeRegister(options *NodeRegisterOptions) error {
nodeOptions, err := createNodeOptions(options)
if err != nil {
return err
}
if _, err := createNode(nodeOptions); err != nil {
return err
}
buf, err := json.EnCodeObjectToBuffer(&nodeOptions.NodeData)
if err != nil {
return err
}
log.Printf("register to cluster - %s %s [addr:%s]\n", node.Cluster, node.Key, nodeOptions.NodeData.APIAddr)
node.discovery.Register(node.Key, buf, node.stopCh, func(key string, err error) {
log.Printf("discovery register %s error:%s\n", key, err.Error())
if err == backends.ErrRegistLoopQuit {
close(node.quitCh)
}
})
return nil
} |
<reponame>ducktec/esp32c3
#[doc = "Reader of register APB_SARADC_2_DATA_STATUS"]
pub type R = crate::R<u32, super::APB_SARADC_2_DATA_STATUS>;
#[doc = "Reader of field `APB_SARADC_ADC2_DATA`"]
pub type APB_SARADC_ADC2_DATA_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:16"]
#[inline(always)]
pub fn apb_saradc_adc2_data(&self) -> APB_SARADC_ADC2_DATA_R {
APB_SARADC_ADC2_DATA_R::new((self.bits & 0x0001_ffff) as u32)
}
}
|
The Spatial-Temporal Evolution of Population in the Yangtze River Delta, China: An Urban Hierarchy Perspective The reason for changes in ranking within urban systems is the subject of much debate. Employing the census data from 1990 to 2020, this paper investigates population dynamics across urban hierarchies and its influencing factors in the Yangtze River Delta. The results reveal an upward pattern of population dynamics and show that the advantages of high-ranking cities in population gathering are obvious, though they have declined recently. Based on a framework of urban amenity and the ridge regression model, the authors argue that concerns of residents in choosing cities in which to settle are gradually changing from economic opportunities to multidimensional amenities, finding that the influencing mechanisms vary across time. This is slightly different from Glaesers consumer cities; economic gains, as physiological needs, are always important for population growth. As higher-level needs, social and natural amenities, including Internet accessibility and urban green space, did not affect growth until the turn of the new millennium. In terms of negative factors, the crowding-out effect of living costs and environmental pollution are not significant, as theoretically expected, suggesting that residents tend to care more about development opportunities than the negative impacts of living in high-ranking cities. Finally, policies are proposed to promote population growth and the coordinated development of large, medium, and small cities in the Yangtze River Delta. |
A precise location method for mine personnel based on residual estimation Kalman filtering Passive moving target localization is one of the key technologies for the safety of operators in underground spaces of mines and timely rescue after disasters. The current algorithm for locating personnel in mines has problems such as low positioning accuracy and susceptibility to non-line-of-sight (NLOS) error interference, etc. In this paper, we propose a method for accurate positioning of mine personnel based on residual estimation Kalman filtering. Firstly, a dual-range TOA ranging technique is used to effectively suppress the measurement errors caused by equipment time asynchrony in signal transmission; then the measured data are pre-processed using the trend shift method to suppress the noise of small probability and large interference in the measured signal; finally, a Kalman filter introducing the idea of residual estimation is designed to overcome the effects of nanosecond dense multipath signals and background noise existing in the mine, thus The accuracy of the measurement of the personnel position in the mine tunnel is improved. Simulation experiments show that the proposed method has strong robustness in the tunnel environment, and the localization accuracy is significantly improved, reducing the impact of non-visual error noise and dense multipath effect on the performance of the algorithm. |
Here's the trailer for the last part of the Peter Jackson's The Hobbit trilogy, The Battle of the Five Armies. It looks spectacular. I really hope they don't screw this one, because what remains is my favorite part of the book. |
Engineering Escherichia coli for the utilization of ethylene glycol Background A considerable challenge in the development of bioprocesses for producing chemicals and fuels has been the high cost of feedstocks relative to oil prices, making it difficult for these processes to compete with their conventional petrochemical counterparts. Hence, in the absence of high oil prices in the near future, there has been a shift in the industry to produce higher value compounds such as fragrances for cosmetics. Yet, there is still a need to address climate change and develop biotechnological approaches for producing large market, lower value chemicals and fuels. Results In this work, we study ethylene glycol (EG), a novel feedstock that we believe has promise to address this challenge. We engineer Escherichia coli (E. coli) to consume EG and examine glycolate production as a case study for chemical production. Using a combination of modeling and experimental studies, we identify oxygen concentration as an important metabolic valve in the assimilation and use of EG as a substrate. Two oxygen-based strategies are thus developed and tested in fed-batch bioreactors. Ultimately, the best glycolate production strategy employed a target respiratory quotient leading to the highest observed fermentation performance. With this strategy, a glycolate titer of 10.4 g/L was reached after 112 h of production time in a fed-batch bioreactor. Correspondingly, a yield of 0.8 g/g from EG and productivity of 0.1 g/L h were measured during the production stage. Our modeling and experimental results clearly suggest that oxygen concentration is an important factor in the assimilation and use of EG as a substrate. Finally, our use of metabolic modeling also sheds light on the intracellular distribution through central metabolism, implicating flux to 2-phosphoglycerate as the primary route for EG assimilation. Conclusion Overall, our work suggests that EG could provide a renewable starting material for commercial biosynthesis of fuels and chemicals that may achieve economic parity with petrochemical feedstocks while sequestering carbon dioxide. to consider the substrate toxicity and biocompatibility, as well as the development of appropriate metabolic pathways for substrate utilization. Furthermore, while certain substrates may be biologically feasible, technical limitations in their own production may render them unusable downstream. While production efficiency and bio-toxicity are more easily assessed, evaluating the feasibility of a new substrate for bio-based chemical production is complicated by how its utilization is linked to the highly interconnected metabolic network. Indeed, refactoring large metabolic pathways in heterologous hosts has proven challenging in the past. One method that may help to explain why a new substrate performs poorly examines the metabolic pathway that supports a substrate for chemical production in relation to the cell's entire native metabolism. In an earlier study, we characterized this relationship by calculating the interactions between two competing objectives of cellular systems; growth and chemical production. The theory laid out how the underlying network structure controls whether chemical production is independent of growth. That relationship was captured by the orthogonality metric which is evaluated by a mathematical framework using elementary flux modes (EFMs) to measure the interconnectedness of the cell system and the desired objectives. We found that the organization of ideal metabolic structures, designed to minimize cell-wide interactions, had a characteristic branched topology. This type of orthogonal structure could be exploited for two-stage fermentation, as it lends itself to the design of metabolic valves for dynamic control. Dynamic control is a strategy employed to increase control over chemical production, often through the temporal segregation of bioproduction from cellular growth. Because of their characteristic branched topology, highly orthogonal pathways often have a key enzymatic step, or metabolic valve, which can be used to control the division of flux for cell growth and chemical production. Various strategies can be used to exert control over these metabolic valves, such as process conditions (pH, temperature, oxygen) or the chemical stimuli of genetic circuits (quorum sensing, inducers, internal metabolite concentration). It seems natural then, that the design of orthogonal pathways, metabolic valves and dynamic control strategies would go hand-in-hand, particularly for the design of two-stage fermentations. Another important finding from our earlier study was that glucose, while a common substrate for industrial fermentation, is not ideally suited for chemical production objectives due to the significant overlap between the pathways for biomass synthesis and chemical production. Instead, substrate selection should be based on the chemical targeted for production. Among the various substrates and products that we evaluated, we identified that ethylene glycol (EG) was a highly promising substrate for orthogonal production of a variety of chemicals because it minimized the interactions between biomass and chemical producing pathways. Today, EG is produced primarily by the petrochemical industry from ethylene, however, renewable alternatives are currently in the early stages of development. In particular, EG can be produced from the electrochemical conversion of CO 2, from the chemocatalytic conversion of cellulosic materials and glycerol (a common waste in industrial biofuel and soap production), as well as from the depolymerization of poly(ethylene terephthalate) (PET) plastic (an abundant waste material) to its monomers. Thus, though unconventional as a feedstock, EG could serve as a sustainable and/or renewable replacement for glucose in the modern bioprocess. Though not commonly reported in metabolic engineering applications, there are two main types of naturally existing pathways that allow microorganisms to consume EG as a carbon source. The first pathway utilizes a diol-dehydratase resulting in the dehydration of EG to acetaldehyde. Acetaldehyde is then activated to acetyl-CoA by an acetaldehyde dehydrogenase enzyme, which provides the cell with the key pre-cursor metabolite to support growth via the tricarboxylic acid (TCA) cycle and gluconeogenic pathways. This pathway is most commonly found in some Clostridium species and a few other anaerobic organisms owing to the oxygen sensitivity of the diol-dehydratase. In the second pathway, EG is successively oxidized using nicotinamide cofactors and oxygen to produce glyoxylate. Glyoxylate, which is a gluconeogenic carbon substrate, can then be used as the growth metabolite as it enters lower glycolysis at the 2-phosphoglycerate node as well as the TCA cycle via the glyoxylate shunt. This oxidative pathway has been shown to exist in a variety of different bacteria. Wildtype Escherichia coli (E. coli) MG1655 cannot naturally grow on or degrade EG. However, it is possible to select for a strain that does, and to our knowledge, only one study has ever reported EG utilization by E. coli.. That strain was selected from derivatives of propylene glycol utilizing mutants. Researchers identified increased activities of propanediol oxidoreductase, glycolaldehyde dehydrogenase and glycolate oxidase as the necessary components required for its assimilation. More generally, a survey of the literature shows that enzyme promiscuity is an essential element of the utilization of alcohols. In this specific case, enzymes regarded as being essential for propanediol or even glycerol utilization across many organisms have shown activity on EG and are regarded as the key methods for degradation, irrespective of the dehydratase route or the oxidative route via glyoxylate. Hence, in this study, EG assimilation was engineered in E. coli by overexpressing two genes: fucO (encoding propanediol oxidoreductase) and aldA (encoding glycolaldehyde dehydrogenase). This synthetic pathway is similar to the second natural EG utilization pathway previously introduced: EG is sequentially oxidized to glyoxylate thereby providing a gluconeogenic carbon substrate for growth. More specifically, the promiscuous activity of propanediol oxidoreductase converts EG to glycolaldehyde, which is subsequently converted to glycolate by glycolaldehyde dehydrogenase. The native glycolate oxidase then transforms glycolate to glyoxylate to support cell growth and maintenance. Motivated by the prospect of utilizing EG as a renewable and alternative feedstock, we sought to compare EG with more conventional feedstocks for the production of select chemicals of industrial significance. In particular, formate, glucose, and xylose were selected as the comparative feedstocks, while succinate, ethanol, glycolate and 2,3-butanediol, were selected as the products of interest. Formate was selected as it is another well-studied, non-sugar feedstock that can be produced via electrochemical CO 2 reduction (eCO 2 R), and it has already been successfully employed within biological systems. Meanwhile, glucose and xylose were selected as typical renewable sugar feedstocks, considering that they comprise the largest fraction of sugars in lignocellulosic biomass. The four products of interest were selected as they are well-known bioproduction targets that have industrial significance [1,. In this study, we began by comparatively evaluating EG as a feedstock by measuring the orthogonality of each substrate-product combination, using select bioconversion pathways. Consistent with our previous evaluation, EG demonstrated the greatest orthogonality score for all four products considered. For the products investigated, it was determined that the EG-glycolate combination scored the highest based on this metric. Thus, as a case study we engineered and characterized E. coli as a biocatalyst capable of growth and glycolate production, using EG. This case study attempts to validate our orthogonal approach for chemical production, relating the network topology and two-stage fermentation. Glycolate is an alpha-hydroxy acid used in the synthesis of a variety of different plastics and polymers, cosmetics and industrial detergents. Conventional approaches to produce glycolate in E. coli have focused on using glucose and/or xylose as a substrate, and typically implement genetic strategies that couple production to growth. Theoretical yields have been dependent on both the substrate selected as well as the biosynthetic pathway used for production. Examples of glycolate production from glucose in literature have primarily been demonstrated by the activation of the glyoxylate shunt, while glycolate production using xylose has been demonstrated by the use of a synthetic pathway for xylose assimilation in E. coli. More recently, a novel synthetic pathway (named "glycoptimus") was also designed and constructed in E. coli and is predicted to reach molar yields of 2.5 and 3, on xylose and glucose, respectively. However, while the pathway has been shown to be functional in E. coli, it has not yet reached the model-predicted yields in vivo. In the identified studies, the highest glycolate titer in E. coli is reported to be 65.5 g/L with a corresponding yield of 0.765 g/g, using glucose as the substrate. In organisms other than E. coli, the highest reported glycolate titer is 110.5 g/L with a corresponding yield of 94.4%, using EG as the substrate. This feat was achieved by Hua and colleagues in Gluconobacter oxydans using an integrated production, separation and purification technology. To our knowledge, only five studies have examined EG conversion to glycolate as a biotransformation, none of which were in E. coli [36,. In this work, we used a combination of computational and experimental investigations to thoroughly characterize the metabolism and growth physiology of E. coli growing on EG (Fig. 1). First, we used orthogonality to demonstrate EG's potential as a substrate and selected the EG to glycolate production pathway as a case study. Next, we characterized the glycolate production system using flux balance analysis (FBA), and showed that the selected pathway supported cell growth through shake flask experiments. Subsequently, two sets of fedbatch growth experiments were performed to optimize growth and production, as well as the use of oxygen as a metabolic valve. Findings from the first growth experiment were combined with a computational study on the effect of oxygen to design strategies for the second set of growth experiments and thus improve pathway performance. Overall, we find that EG has the potential to replace glucose in industrial bioprocesses, particularly in applications where renewable EG can be easily sourced or produced. Further, we demonstrate that computational tools can successfully inform the design and optimization of production systems. Ethylene glycol is a promising substrate In an earlier study, we identified orthogonality as a metric to assess and design efficient metabolic networks for the production of chemicals. That study defined orthogonality as a quantitative measure of the interconnectedness between pathways that produce a target chemical and biomass. Since then, this principle has been demonstrated in a separate study. More specifically, the orthogonality metric is a mathematical measure of the set of interactions that each substrate assimilation pathway has to the cell components outside their pathways. Hence, it implicitly measures the biological complexity one might expect to ensure that the biomolecular machinery of that pathway can concurrently function within the cell's natural metabolism to support biological and chemical production objectives. It also allows for metabolic constraints such as redox and ATP to be accounted for. The principal focus of that earlier work Fig. 1 Summary of investigations performed in this study and the key results. a The orthogonality (OS) for various substrate-product pairs were evaluated. The most orthogonal pair , was selected to demonstrate the use of orthogonality as a metric to establish successful production systems. b A stoichiometric metabolic model was built to characterize the production system and its metabolic behaviour using flux balance analysis (FBA) and flux variability analysis (FVA). Both aerobic and oxygen-limiting conditions were used to investigate the effect of oxygen level on growth and production. c Two strains employing the selected pathway, each with different enzyme mutants, were then tested in shake flasks to confirm that the pathway could indeed sustain cell growth and glycolate production. d The best performing strain (LMSE11) was then tested in bioreactors, using a two-stage growth/production system. As per the identified metabolic valve (glycolate oxidase) and FBA results, a decrease in oxygen level was predicted to switch the system from growth to production. Thus, two secondary air flow rates were tested to evaluate the effect of oxygen level on the production stage. e Using data collected from the shake flask and bioreactor experiments, metabolic modeling was used to further characterize the production system and its response to oxygen level. f Finally, insights gained from modeling and earlier experiments were used to inform the design and testing of two strategies to improve glycolate production in the bioreactors. Colours indicate the type of analysis performed: blue for computational and green for experimental. Key results are reported below the dotted lines was to examine how metabolic pathway organization influences chemical production. Here, orthogonality is used, in addition to yield, as a metric to evaluate the compatibility of specific substrate and product pairs. We began by first evaluating EG as a feedstock through comparison with three other conventional feed materials: formate, glucose and xylose. Formate was selected as it is a well-developed electrochemical product and has been identified as a good potential feedstock for bioprocesses. Glucose and xylose were selected as they are both conventional sugar feedstocks, typically employed in bioprocesses. This feedstock comparison was performed by evaluating the orthogonality of each substrate when used to produce four different chemical products of industrial importance (succinate, ethanol, glycolate, and 2,3-butanediol) [1,. While single representative assimilation and conversion pathways were selected for each substrate to product conversion, it should be noted that other pathways exist and continue to be developed which may have different yield and orthogonality scores. For example, xylose utilization can occur via various natural and synthetic pathways, particularly for glycolic acid production. As shown in Table 1, EG consistently demonstrates the highest orthogonality scores for each product. From a yield perspective, EG is predicted to have the highest theoretical yield (g product/g substrate) for both glycolate and 2,3-butanediol, and competitive theoretical yields for succinate and ethanol. Comparatively, while formate demonstrates greater orthogonality than the conventional sugar feedstocks, it scores consistently lower than EG and its predicted yield is notably low across all four products. Analysis of the metabolism of formate shows its lower orthogonality scores, when compared to EG, arises from its low degree of reduction that necessitates flux through the TCA cycle to generate the reducing equivalents required for growth and energy, irrespective of what chemical is produced. Formate's low degree of reduction is also the reason for its low predicted product yields. Hence, this line of analysis suggests that, from the perspective of ranking non-sugar feedstocks, EG is a superior substrate to formate in E. coli. Thus, EG was selected for further comparison with the conventional sugar feedstocks, glucose and xylose. Since EG and glycolate demonstrated the highest yield and orthogonality for all substrate-product pairs, this particular pair was selected for further evaluation. Figure 2a shows the glycolate production pathways selected for xylose, glucose and EG. For the three pathways shown, xylose exhibits the lowest orthogonality score (0.34), owing to glycolate production being highly coupled to biomass synthesis. In this pathway, the biomass precursor, DHAP, and the glycolate precursor, glycolate, are concomitantly produced. Consequently, when using this pathway, it is impossible to separate chemical and biomass production, as one necessitates production of the other. Production from glucose is also highly coupled to biomass synthesis, and exhibits a low orthogonality score (0.41). While this pathway fits partly into an orthogonal criterion for glycolate production, the concomitant production of pyruvate for every mole of glycolate requires the use of the cell's highly interconnected glyoxylate cycle to reach theoretical yields. The orthogonality score, for this reason, is comparatively smaller than that of EG. As previously reported in Table 1, the production of glycolate from EG exhibits the highest orthogonality score (0.67). Unlike xylose and glucose, EG is not naturally assimilated by E. coli, however it can be engineered to do so through incorporation of the pathway shown in Fig. 2b. As previously noted, E. coli naturally possesses 1,2-propanediol oxidoreductase (fucO), for which mutants have reportedly shown promiscuous activity with EG. This observation forms the basis of the engineered EG assimilation pathway. Using a suitable propanediol oxidoreductase mutant, EG is first converted to glycolaldehyde and subsequently transformed to glycolate through the action of Table 1 Yield and orthogonality metrics for chemical production from different substrates The orthogonality scores for various products are shown comparing two substrates that can be generated electrochemically (EG and formate) against conventional sugar substrates (glucose and xylose) assimilated via their natural pathways. Formate has orthogonality scores similar to many sugar consuming pathways, indicating its utilization is relatively complex and interconnected with native growth. Ethylene glycol (EG) exhibits the highest orthogonality scores, and has higher or comparable theoretical yields relative to the other substrates. Yield is given as g of product per 1 g of substrate Ultimately, as evidenced by the orthogonality scores and the pathway topologies for glycolate production, EG is more orthogonal than the traditional substrates investigated, and hence suitable for validating the concept of pathway design based on orthogonality. Thus, we performed a case study for the production of glycolate from EG in E. coli MG1655. In this case study, a combination of computational and experimental evaluations were performed to characterize this production pathway, and to explore its implementation within a two-stage fermentation system. Modeling growth of E. coli using EG To gain insight into the expected metabolic behaviour, the intracellular fluxes of the production system were first investigated using flux balance analysis (FBA) and flux variability analysis (FVA). For this analysis, the E. coli core model was modified with the addition of the. Xylose and glucose are the two most commonly studied substrates; however, glycolate production from these substrates is limited by the interconnectedness of the growth and production pathways, as indicated by their topology and low orthogonality scores. Consequently, efficient production of glycolate from either of these compounds necessitates the coupling of growth and glycolate synthesis. Alternatively, EG assimilation lends itself to a branched topology that permits the decoupling of glycolate synthesis and cell growth (high orthogonality score). b Although Escherichia coli (E. coli) cannot naturally assimilate EG, the conversion of EG to glycolate can be introduced via the overexpression of a mutant propanediol oxidoreductase (encoded by fucO) and glycolaldehyde dehydrogenase (encoded by aldA). The subsequent conversion of glycolate to glyoxylate provides a branch point that serves as a metabolic valve. Under fully aerobic conditions, glycolate is converted to glyoxylate and channeled to the central metabolism for growth via glycerate metabolism. Alternatively, under oxygen limiting conditions glycolate accumulates. Oxygen can thus be used for dynamic control of this metabolic valve, thereby decoupling the production of glycolate and biomass. Colours denote the following: exogenous steps (purple), the desired product (red), biomass precursors (green), and metabolic valves (blue). For production from glucose (A (iii)), red arrows are also used to specify the product pathway. DHAP dihydroxyacetone phosphate, 3PG 3-phosphoglycerate, 2PG 2-phosphoglycerate, PEP phosphoenolpyruvate, AcCoA acetyl-CoA, Cit citrate, Icit isocitrate, Akg alpha-ketoglutarate, SucCoA succinyl-CoA, Succ succinate, Fum fumarate, Mal malate, Oaa oxaloacetate, EG ethylene glycol, TSA tatronate semialdehyde EG assimilation pathway previously shown in Fig. 2a, encompassing the conversion of EG to 2-phosphoglycerate (2PG). Transport and import/export reactions were also added for EG, assuming it crossed the membrane by free diffusion. FBA and FVA were performed under both aerobic and micro-aerobic (oxygen-limiting) conditions, to investigate how oxygen level influences growth and production. Results for the FBA and FVA analyses are summarized in Fig. 3. Under aerobic conditions (Fig. 3a), FBA predicts a biomass production rate of 0.30 h −1 (mass yield of 0.49 gDW/gEG), no glycolate production and no by-product production other than CO 2 (ie. acetate, ethanol, etc.). Approximately 91% of the glyoxylate flux is channeled towards 2-phosphoglycerate (2PG) and enters lower glycolysis. The remaining glyoxylate is used to generate malate via malate synthase. Of the total carbon (EG) entering the cell, 22% is channeled towards acetyl-CoA and 3.3% enters the TCA cycle. Conversely, about 18% of the total carbon is channeled by gluconeogenic pathways towards upper glycolysis and the pentose phosphate pathways, when accounting for stoichiometry. Under micro-aerobic conditions, modeling suggests that glycolate production will occur (Fig. 3b). More specifically, the model predicts a biomass production rate of 0.087 h −1 and a glycolate export flux of 2.2 mmol/gDWh. These values correspond to a biomass mass yield of 0.29 and a glycolate mass yield of 0.56. The only by-product observed under micro-aerobic conditions is CO 2. The FVA values reported for both aerobic and micro-aerobic conditions provide a range of flux values that may be possible. In the case of the tartronate semialdehyde reductase (TRSARr) reaction, the large lower bound flux predicted by FVA indicates that alternate optima exist for this system, for the set of constraints used. Overall, the FBA results suggest that micro-aerobic conditions are required for the secretion of glycolate and that oxygen level could be used as an effective control mechanism for switching from growth to production. In the sections that follow, we attempt to experimentally validate the Colours denote the following: exogenous steps (purple), the desired product and its export (red), and metabolic valves (blue). Increased line thickness for the glyoxylate to TSA reaction indicates the 2:1 reaction stoichiometry, accounting for the reduction in total flux observed at the glyoxylate node predicted link between oxygen levels and glycolate production and to use modeling to better understand this relationship. Applying these insights, we then develop and test two strategies to optimize glycolate production in E. coli, using EG as the feed material. Establishing ethylene glycol utilization by E. coli As previously described and predicted computationally, EG assimilation and conversion to glycolate can be introduced in E. coli through the expression of a suitable 1,2-propanediol oxidoreductase mutant (fucO), and glycolaldehyde dehydrogenase (aldA). Previous studies have shown that Fe 2+ -dependent propanediol oxidoreductases (encoded by fucO) can be inactivated by metal-catalyzed oxidation (MCO) and are therefore sensitive to oxygen. Thus, we designed two variants of the pathway, each containing fucO mutants reported as being more oxygen-stable. Variant 1 (strain LMSE11) contained fucO with mutations I7L and L8V, as reported by earlier mutagenesis studies. Variant 2 (strain LMSE12) contained fucO with a single L8M mutation, as it was also suggested to play a role in alleviating metal catalyzed oxidation (MCO) toxicity in propanediol assimilation by E. coli. Both variants had the same ribosome binding site and trc promoter upstream of the start codon. As shown in Fig. 4, the fermentation profiles for the two constructed strains were markedly different. LMSE11 completely consumed EG in 47 h while LMSE12 had consumed only ~ 10% of the initial substrate in the same time period with 10 g/L as residual EG. Growth yield for LMSE11 was calculated to be 0.28 gDW/g EG. Comparatively, a theoretical yield of 0.49 gDW/gEG was predicted by flux balance analysis (FBA) using the E. coli iAF1260 model (previously described). Thus, the experimental results suggest that the two genes introduced (fucO and aldA) are sufficient for supporting the conversion of EG to biomass, when combined with E. coli's natural biosynthetic pathways. However, the actual yield is less than 60% of the model-predicted yield, thereby suggesting that the pathway may not be operating optimally (i.e. oxygen sensitivity) or that the modeling may need additional constraints. For example, the substrate uptake rate in shake-flasks was determined to be 5 mmol/gDWh, compared to the 10 mmol/gDWh assumed previously for modeling. When the modeling is performed instead for an EG uptake flux of 5 mmol/gDWh, the predicted biomass yield decreases to 0.43 gDW/gEG. Similarly, EG transport was modelled as free diffusion, however if in reality proton symport is required, it may further limit the achievable yield. Oxygen-limiting conditions could also explain a lower than optimal yield, however this is unlikely since analysis of the fermentation media by highperformance liquid chromatography (HPLC) showed the absence of intermediate metabolites (glycolaldehyde and glycolate) and fermentation products, such as acetate or lactate. The experimental growth rate was calculated to be 0.18 h −1, corresponding to a 3.85 h doubling time. Since LMSE11 showed higher utilization rates, this variant was used in subsequent experiments. Effect of oxygen on two-stage glycolate production in E. coli Having established EG utilization by an engineered strain of E. coli, we next explored the use of EG as an orthogonal substrate for the production of glycolate within a twostage fermentation system. As previously described, the branched topology of the EG-glycolate pathway lends itself to the design of a metabolic valve for the separation of cell growth and chemical production. In particular, glycolate oxidase was identified as a potential metabolic valve, for which oxygen level was predicted to be a control mechanism. A higher oxygen level is expected to support biomass growth, while lower oxygen levels are expected to trigger glycolate accumulation. Thus, two reductions in oxygen (air flow rate) were tested to evaluate the effect of oxygen on glycolate production. For this evaluation, LMSE11 (variant 1) was grown in bioreactors with minimal media, supplemented with 2 g/L of yeast extract. The bioreactors were inoculated at an initial OD of ~ 0.4 (approximately 0.23 gDW/L), with inoculum prepared as described in the methods. Expression of the EG utilization genes was induced with 1 mM IPTG. During the growth stage, the impeller agitation was set at 1000 rpm, and the reactors were sparged with air to maintain an aeration rate of 300 mL/min (1 v/vm). At 20 h, the aeration was reduced to 150 mL/min (0.5 v/ vm) or 50 mL/min (0.16 v/vm), to simulate high and low aeration rates, and the impeller agitation was dropped to 500 rpm (Fig. 5). We observed that cell growth continued until approximately 40 h, reaching approximately 5 gDW/L, at which point cells in both reactors appeared to enter a stationary phase. Production of glycolate started at approximately 20 h and continued until the fermentation was terminated at 70 h. Cells grown at a higher secondary aeration rate (150 mL/min) accumulated more glycolate by the end of the batch (Fig. 5b), reaching a final glycolate titer of 4.1 g/L, compared to 2.5 g/L for the lower secondary aeration rate (50 mL/min) (Fig. 5a). Similarly, the average mass yields for glycolate on EG, as measured during the production phase, were 0.32 g/g and 0.18 g/g, for the high and low secondary aeration rates, respectively. Using FBA to approximate carbon loss from respiration and accounting for cell growth and acetate production, we were able to close the carbon balance at 83% and 88%, respectively. This carbon balance was performed by determining the moles of EG required to supply the carbon in each mole of product (glycolate, acetate and biomass). In the case of biomass, this value was determined using FBA and together these values accounted for the total expected CO 2 production. These values were then converted to the mass of EG required and compared to the actual mass of EG consumed, leading to the values reported above. Further refinements to the FBA model for E. coli growth on EG may close the carbon balance with improved accuracy. Counter-intuitively, despite propanediol oxidoreductase (fucO) being oxygen-sensitive, it was observed that the higher secondary aeration rate (150 mL/min) led to higher glycolate titers (Fig. 5b). This result can be explained by the fact that oxygen is required for the regeneration of NAD +, which is a substrate for the EG utilization pathway (see Fig. 2b). Hence, lower oxygen concentrations could lead to reduced flux through this pathway resulting in lower titers. These results suggest that there is a trade-off between the oxygen sensitivity of propanediol oxidoreductase (fucO) and the oxygendependent regeneration of NAD + required by the pathway. Based on these results, we turned to metabolic modeling to computationally evaluate the effect of oxygen and thus determine a strategy for improved glycolate production in vivo. Effect of oxygen on ethylene glycol metabolism in E. coli To refine our strategy for glycolate production, FBA simulations were used to gain further insight into the cell's metabolic response to changes in oxygen. To characterize the effect of oxygen, the production of the glycolate (yield), biomass production (biomass flux), the cell's respiratory quotient (RQ; ratio of carbon dioxide emitted to oxygen consumed), and the substrate specific productivity (SSP) were modelled as a function of the oxygen uptake rate (OUR). The formation of byproducts was also followed over the same OUR range. The normalized results are shown in Fig. 6, while the raw values prior to normalization are shown in Additional file 1: Figure S1. The modeling results support the use of oxygen as a mechanism to switch from the growth phase to the production phase, within the glycolate production system. As shown in Fig. 6a, glycolate production begins at a limiting oxygen uptake rate (OUR) of approximately 6.6 mmol/gDWh, and glycolate yield continues to increase as oxygen uptake is further reduced. Contrarily, the biomass flux and RQ both increase with the OUR up to a maximum value. This maximum value depends on the flux of EG, and Fig. 5 Influence of aeration on glycolate production. To assess the impact of oxygen level in bioreactors, two aeration rates were tested during the production (micro-aerobic) phase of the fermentation. A flow rate of 50 mL/min was tested for the low aeration rate (a), while a flow rate of 150 mL/ min was tested for the high aeration rate (b). Experiments were performed in duplicate. Error bars indicate the range of the measured values corresponds to the same limiting OUR (~ 6.6 mmol/ gDWh) at which glycolate production commences. This limiting OUR value changes depending on the EG flux. The modeling also predicts that by-product formation occurs when the OUR drops below 5.25 mmol/ gDWh, and that the type and amount of by-product formation (namely ethanol) depends on the OUR relative to the EG flux. Since the specific oxygen uptake rate is a function of air intake, this analysis allowed us to implicitly correlate the glycolate yield to the flowrate of air into the reactor. Furthermore, as the RQ and glycolate yield can be correlated via their relationship to the OUR, it should also be possible to optimize glycolate production in real-time by measuring and controlling the RQ throughout a fermentation experiment. Controlling the RQ was thus identified as a possible strategy to improve glycolate production. The selected RQ must balance the glycolate yield, substrate specific productivity and cell growth, to optimize glycolate production. The substrate specific productivity (SSP) is a measure of the moles of product obtained per mole of substrate per time (molP/molS h), and is calculated as the product of the product yield and cell growth rate (Y PS ). Based on Fig. 6, operating between an RQ value of ~ 0.15-0.4 is most optimal when considering the yield, SSP and cell growth. Therefore, to test whether this approach would increase glycolate production, an RQ value of 0.4 (corresponding to a normalized value of ~ 0.6) was selected to test experimentally. Improving glycolate production Informed by experimental and computational findings, two strategies were tested to increase glycolate production yield and titers. In the first strategy, a higher aeration rate was tested in the growth phase, while in the second strategy the RQ value was used to control the aeration rate in the production phase (as suggested via modeling). For both strategies, bioreactor experiments were performed as previously described, but with modified aeration rates and mixing speeds. The first strategy was tested with the goal of reducing the biomass production phase and increasing the glycolate production phase. To achieve this, the aeration rate during the growth phase was increased to 600 mL/min (2 v/vm) to prevent glycolate accumulation and divert flux towards cell growth. In the second phase, the aeration rate was dropped to 100 mL/ min. Results for this strategy are shown in Fig. 7a. Cell growth continued for approximately 70 h, after which the reactor appeared to reach stationary phase. Production of glycolate began at approximately 70 h and continued until the fermentation was terminated at 140 h (70 h of production). Final glycolate titers reached 6.8 g/L, with an initial production phase biomass concentration of approximately 4 gDW/L, corresponding to an average productivity 0.1 g/L h or approximately Fig. 6 The effect of microaerobic (oxygen-limiting) conditions on glycolate production, as predicted by modeling. a Flux balance analysis (FBA) was used to predict the glycolate yield (mol glycolate/mol EG), cell growth rate (gDW/gDWh), respiratory quotient (RQ, mol CO 2 /mol O 2 ) and the substrate specific productivity (SSP, g glycolate/g EGh) over a range of oxygen uptake rates (OURs, mmol O2/gDWh). These values were then normalized with respect to their highest values, and are presented here as: Glycolate Yield (red circle), Biomass (yellow diamond), RQ (green triangle) and SSP (blue square). The normalized RQ value corresponding to the selected RQ value of 0.4 is shown by the dotted black line. b FBA was also used to explore by-product fluxes (mmol/gDWh) over the same range of oxygen uptake rates (OURs). The by-products shown are those most commonly produced under fermentative conditions: acetate (blue circle), ethanol (purple diamond) and formate (green triangle). The FBA simulations were performed using biomass as the objective and with the EG flux set to 5 mmol/gDWh, as measured during the shake flask experiments. The simulations predict that glycolate production begins at the onset of oxygen limitation, at approximately 6.6 mmol/gDWh of oxygen. At greater OUR values, FBA predicts that there is no glycolate accumulation or by-product production and that the RQ and cell growth plateau, since enough oxygen is available for complete respiration 0.32 mmol/gDWh. The initial yield of glycolate was 0.92 g/g after the first sample was taken, however, the cumulative yield decreased during the course of the production stage with the final overall production yield being 0.75 g/g, or 61% of theoretical (see Table 1). In implementing this first strategy, we produced significantly more product at a higher yield; however the cells took much longer to reach an appropriate concentration for the production phase. At 600 mL/min (2 v/vm) during the growth phase, it took almost 70 h to reach a concentration of 4 gDW/L, while comparatively the previously tested 300 mL/min (1 v/vm) produced the same cell concentration within 30 h. We hypothesize that it took longer to reach a higher OD due to the oxygen sensitivity of propanediol oxidoreductase (fucO). Even using the more oxygen-stable mutant (strain LMSE11), the increased dissolved oxygen levels and faster oxygen mass transfer rates likely caused oxygen toxicity in the cells due to the inactivation of propanediol oxidoreductase by MCO. This likely placed a high metabolic burden on the cell stemming from high protein demand without a sufficient means to utilize EG as a carbon source. Considering all of the bioreactor experiments up to this point, it seems that higher aeration rates lead to higher glycolate titers and yield, yet also retard cell growth when too high. From a process perspective, meeting the oxygen demand in large-scale aerobic bioreactors often requires significant energy input, making it a major operating cost. Thus, if possible, it is desirable to operate reactors at a lower air flow rate. With this in mind, the second strategy we employed sought to produce glycolate at a high titer but at a lower aeration rate. Hence, cells were cultivated under a constant low aeration rate of 50 mL/ min (0.17 v/vm), but at a variable impeller speed. In the growth stage, the impeller speed was controlled such that the oxygen level remained above 20% (up to a maximum of 1000 rpm). In the production phase, the impeller speed was decreased to reduce oxygen transfer. As mentioned previously, our FBA simulations showed a correlation between the RQ and the glycolate yield, and we stipulated that this relationship could be used to optimize glycolate production. From this computational analysis, we determined that an RQ value of ~ 0.4 was within the optimal operating region. Therefore, the production stage impeller speed was decreased until the RQ, as measured by the online-mass spectrometer, reached the selected value of ~ 0.4. The RQ was first measured at the start of the production stage at 26 h, and was measured at multiple intervals over the remainder of the 140-h fermentation. Overall, the average RQ value during this time was measured as 0.37. The results of this experiment are shown in Fig. 7b. Using this RQ-based strategy, the growth stage was reduced to 26 h, with a final cell concentration of approximately 2.4 g/L at the end of the growth phase. Glycolate production began at approximately 28 h and continued until the fermentation was terminated at 140 h. During the 112 h of production time, 10.4 g/L of glycolate was produced with overall yield of 0.8 g/g from EG. The productivity was determined to be 0.1 g/L h, making it comparable to the productivity measured in the first strategy (high aeration rate). These experimental results suggest that the RQ can indeed be used as an effective control variable, as predicted by FBA. However, while FBA Fig. 7 Fermentation profiles for aeration strategies. Two different aeration strategies were tested to improve glycolate production. The growth phase and production phase are separated by grey shading. a The first strategy employed a high flow rate of 600 mL/min during the growth phase, and a flow rate of 100 mL/min during the production phase. The final growth phase cell density was approximately 4 gDW/L and the final glycolate titer reached 6.8 g/L. b The second strategy used a consistently low flow rate of 50 mL/min during both the growth and production phases, but the impeller speed was reduced during the production phase such that the respiratory quotient (RQ) was approximately 0.4. The average stationary phase cell density was 2.5 gDW/L. Cells were capable of robust glycolate production for well over 100 h in the production phase, reaching a final glycolate titer of 10.4 g/L simulations predicted that an RQ value of ~ 0.4 would reach a glycolate molar yield of ~ 0.4 mol/mol, experimentally this strategy led to a molar yield of 0.66 mol/ mol. These results suggest that while the FBA simulations were useful in identifying a control strategy to improve glycolate production from EG, further optimization of model parameters is required to accurately predict the physiological response to environmental conditions. Therefore, the substrate uptake rate along with the ATP maintenance parameters are important parameters that may need further investigation. This is further discussed in Additional file 1: Figure S1. Discussion Conventional approaches to the bio-based production of chemicals rely heavily on sugar-based feedstocks, such as glucose and xylose. Yet, microorganisms tend to be very diverse in their ability to metabolize different carbon sources. In this work we proposed and examined the use of EG as a non-sugar alternative to support growth and chemical production in bioprocesses. One of the greatest motivations in studying EG as a substrate stems from its ability to be derived from CO 2, either through electrochemical reduction or other conversion technologies. Hence, its consideration as a feedstock that can potentially sequester carbon is akin to studies examining syngas or formate utilization. By evaluating the orthogonality of selected substrateproduct pairs, the conversion of EG to glycolate was identified as a good candidate to assess EG utilization in the context of biochemical production. From the results obtained in this study, we conclude that EG is a suitable platform for growth and highly efficient for producing glycolate. More generally we also believe that with further metabolic engineering, EG could be used to produce alcohols and other organic acids that are typically produced during fermentative metabolism. This capability, we believe, can have an impact in industrial biotechnology. Our consideration for EG as a substrate was driven primarily by challenges related to the utilization of sugarbased substrates in E. coli. As we described earlier, the amount of interactions between growth and production pathways affects the level of production that can be achieved. These interactions, which we quantified previously as orthogonality, help to identify pathways with high and low degrees of interactions. Computationally, we find that EG exhibits a lower level of interaction compared to many natural and some synthetic pathways, which we believe makes it a more robust substrate than other alternative substrates such as formate or methanol. Hence, these interactions provided a rational basis for selecting and engineering a novel substrate-utilizing pathway in E. coli. To our knowledge, this work demonstrates the first de novo design of a bioproduction pathway from alternative substrates based on the orthogonality metric. Our results demonstrate the applicability of E. coli to use a new and novel substrate that had not been considered previously. Initial characterization of cell growth using shake flasks showed a substrate uptake rate of approximately 5 mmol/gDWh. At typical cell densities for industrial processes (10-100 g/L), this corresponds to a net flux of 3-30 g/L h, well above the required 3-4 g/L h productivity typically needed for growth-independent production. Further characterization of this strain led us to determine that there was some oxygen sensitivity, especially during early exponential phase. We believe that this is likely caused by metal catalyzed oxidation of 1,2-propanediol oxidoreductase (fucO) in the presence of excess aeration and could be addressed by using O 2 -tolerant Zn 2+ -dependent variants. Based on previous reports from literature, two fucO mutants predicted to have improved O 2 -tolerance were tested. Ultimately, the I7L/L8V mutant (LMSE11) showed the greatest EG assimilation and growth rate/ yield, and was thus used for subsequent experiments. It should be noted, however, that the growth yield with this strain was still lower than the value predicted by modeling: 0.28 g DW/g EG compared to 0.43 g DW/g EG predicted under aerobic conditions at the measured substrate uptake rate. This difference suggests that the enzyme is still not operating as efficiently as possible and that additional improvements may be achievable through enzyme engineering. In particular, directed evolution or rational design approaches could be used to improve the enzyme activity and oxygen tolerance of fucO. Such improved enzymes can lead to further improvements in glycolate production from EG. An important observation made was a reduction in the substrate uptake rate during oxygen limiting conditions. When oxygen is limiting, reduced metabolites and electron carriers can accumulate. Since the first two steps of the EG-assimilation pathway are NAD + -dependent, we believe that the reduced oxygen results in increased NADH pools, leading to a decrease in the rates of reaction catalyzed by fucO and aldA (which each require NAD + as a cofactor). This change in the rates had a net effect of lowering the flux of EG into the cell. This finding necessitates a further study of cellular physiology under EG utilization in order to understand the trade-off in yield and productivity as a function of the dissolved oxygen feeding in bioreactors. For example, in the first set of bioreactor experiments we found that using a higher aeration rate led to a higher overall glycolate titer (4.1 g/L at 150 mL/min compared to 2.5 g/L at 50 mL/min). Comparatively, by maintaining a target respiratory quotient during the second set of bioreactor experiments, an even higher product titer was achieved at the lower aeration rate (10.4 g/L at 50 mL/min). Hence, optimization of aeration in the bioreactor would substantially improve economic performance, not only in terms of product formation but also in terms of the absolute cost of aeration. In this study, computational modeling allowed us to predict and better understand the metabolic behaviour for EG utilization. Through flux balance analysis (FBA) it was demonstrated that EG is assimilated into native metabolism through glyoxylate oxidase, and that this enzyme could serve as an effective metabolic valve for the control of cell growth and production. FBA predicted that glycolate was unlikely to be produced under aerobic conditions, however under oxygen-limiting conditions glycolate was likely to accumulate. This agreed well with bioreactor studies, which showed that glycolate accumulation could be induced through a reduction in the aeration. Modeling also predicted a reduction in EG uptake rate, under oxygen-limiting conditions, which was equally seen experimentally. During the experiments, we also observed small amounts of acetate and trace amounts of ethanol in the fermentation media during the micro-aerobic glycolate production phase. The presence of acetate and ethanol in the fermentation medium, typical products of anaerobic growth, suggests that EG may be a suitable feedstock for the production of other anaerobic products. Comparatively, ethanol production, depending on the ratio of the EG uptake flux and the OUR (Fig. 6b). These results, and the absence of acetate formation in the modeling predictions, highlight that while modeling is useful for obtaining a general understanding of metabolic behaviour, the model accuracy depends largely on the completeness and accuracy of the constraints used. More specifically, many regulatory changes occur in cells when they are under oxygen-limiting conditions that were not accounted in the models and which may cause some discrepancies. Hence, it is possible that taking into account other anticipated regulatory modifications can lead to more accurate predictions. Finally, by extending the observations from FBA, we were able to correlate the glycolate production with the respiratory quotient. Thus, by measuring the respiratory quotient in real-time, using data from the process mass spectrometer, we were able to use this correlation to improve and control glycolate production during the course of the fermentation. This computational-based strategy, which led to the highest glycolate titers and production rate at a relatively low aeration rate, could be employed in other production systems to achieve similar improvements in production. Ultimately, we believe that our results successfully demonstrate the design and optimization of bioproduction pathways using computational tools and their metrics (i.e. orthogonality), particularly for the design of pathways for unconventional, non-sugar feedstocks. Conclusions The results described in this study establish a framework for future production of chemicals in E. coli using EG as a substrate. We describe, for the first time, the successful production of glycolate from EG using the substrate as a feedstock for growth and for production. We also used metabolic modeling to identify the oxygenation conditions that optimize the production of glycolate and validated the strategy experimentally, thereby illustrating the value of metabolic models in bioprocess optimization. Further, we also showed that ethylene glycol utilization pathways are highly orthogonal to cellular metabolism making it an important feedstock for accelerated metabolic engineering using dynamic control. We also illustrated the value of orthogonal pathways for dynamic control of metabolism using oxygen as a control valve to switch to and optimize glycolate production. We believe this can have important implications in the future for integrating biorefineries into industries where carbon dioxide can be captured and converted from point sources. Methods A summary of the investigations is presented in the Introduction (Fig. 1). First, the orthogonality was evaluated to compare EG as a feedstock for selected bioproducts (Fig. 1a). Based on this analysis, the EG to glycolate pathway was selected to be used as a case study for orthogonality-based pathway design. The production system and its anticipated metabolic behaviour was first characterized using flux balance analysis (FBA) and flux variability analysis (FVA) (Fig. 1b). Following this analysis, the pathway was tested in shake flasks, to confirm that the pathway could indeed support both cell growth and glycolate production, as predicted computationally (Fig. 1c). For this experiment, two strains were tested, each having a different fucO mutant, informed based on previously published studies (discussed further in text). The best growing strain was then selected for further testing in bioreactors (Fig. 1d). As per the identified metabolic valve (glycolate oxidase), two reduced air flow rates were tested in the production stage to evaluate the use of oxygen as a control method for two-stage growth and production. Using data from the shake flasks and bioreactor experiments, flux balance analysis (FBA) was subsequently performed to gain insight on the metabolic response to oxygen level and to determine the optimal air flow rate for glycolate production (Fig. 1e). Finally, a second round of bioreactor experiments was performed to test whether glycolate production could be further improved, using the insights gained through experiments and modeling (Fig. 1f ). The specific methods employed in these experimental and computational analyses are discussed in detail in the sections that follow. Media and cultivation conditions Cells were grown using lysogeny broth (LB) as per manufacturer's instructions (Bioshop, Burlington, ON) for all strain construction and fermentation pre-cultures. Pre-cultures were grown in LB media in 10 mL test tube cultures overnight and transferred to fresh 250 mL shake-flaks containing 50 mL LB, 1 mM IPTG and 10 g/L EG. After 24 h, these cells were harvested by centrifugation, re-suspended in 2 mL of residual supernatant and used as inoculum for bioreactor or minimal media shakeflasks for characterization at 37 °C. When characterizing strains (see Fig. 1c), cells were grown in M9 minimal media with the following compositions: 1.0 g/L NH 4 Cl, 3.0 g/L KH 2 PO 4, 6.8 g/L Na 2 HPO 4, 0.50 g/L NaCl. Supplements of yeast extract at 2 g/L were added to minimal media. EG was used as the carbon source at a concentration of ~ 10 g/L. IPTG was used at a concentration of 1 mM to induce expression of the EG assimilation pathway. A trace metal solution was prepared according to the following composition prepared in 0.1 M HCl per litre and added to the media at a concentration of 1/1000: 1.6 g FeCl 3, 0.2 g CoCl 2 ⋅6 H 2 O, 0.1 g CuCl 2, 0.2 g ZnCl 2 ⋅4H 2 O, 0.2 g NaMoO 4, 0.05 g H 3 BO 3. 1 M MgSO 4 and 1 M CaCl 2 was also added to the media at a concentration of 1/500 and 1/10,000, respectively. For all cultures, carbenicillin was added as appropriate at 100 g/mL. All characterization experiments were conducted with 50 mL media in 250 mL shake flasks, continuously agitated at 230 rpm and at 37 °C. Culturing techniques employed in the bioreactors are described below. Culturing techniques in reactors Applikon MiniBio500 500 mL fermentation vessels with a 300 mL working volume were used for cultivating strains in bioreactors. The fermentation vessels were equipped with condensers to prevent changes in volume due to aeration. Dissolved oxygen and pH probes were used in accordance with the manufacturers operating guidelines. pH was maintained at 7 with the addition of 3 N KOH. Growth conditions were maintained at 37 °C. Bioreactors were inoculated with pre-culture (previously described) at OD ~ 0.4 (approx. 0.23 gDW/L). In total, four bioreactor cultivations were conducted using E. coli strain LMSE11 in minimal media, supplemented with yeast extract at 2 g/L. The bioreactors contained 1 mM IPTG to maintain induced expression of the EG pathway genes. All bioreactor cultivations were carried out in fed-batch. Systematic changes in aeration rate and impeller speed were applied between cultivations, as detailed below. Flowrate was controlled using a Books Instruments mass flow controller (GF Series) and gas was analyzed using Thermo Scientific ™ Sentinel dB mass spectrometer for online gas measurement. Cultivations 1 and 2: Cells were grown at an impeller speed of 1000 rpm and sparged with air to maintain oxygen at 300 mL/min (1 v/vm). At 20 h, the aeration was reduced to 150 mL/min (0.5 v/vm) or 50 mL/min (0.16 v/vm) to simulate high and low secondary aeration rates, and the impeller speed was dropped to 500 rpm. See Fig. 1d. Cultivation 3: Cells were grown at an impeller speed of 1000 rpm and sparged with air to maintain oxygen at 600 mL/min (2 v/vm) during the growth phase. In the production phase, the impeller speed was reduced to 500 rpm while the aeration rate was dropped to 100 mL/ min (0.33 v/vm). See Fig. 1f. Cultivation 4: During the growth phase, cells were sparged with air to maintain oxygen at 50 mL/min (0.16 v/vm) throughout the cultivation and the impeller speed was controlled to maintain a minimum oxygen level of 20%, up to a maximum impeller speed of 1000 rpm. In the production phase, the impeller speed was reduced as necessary to achieve a respiratory quotient (RQ) of ~ 0.4. See Fig. 1f. Analytical methods Analysis of fermentation production was measured via high performance liquid chromatography (HPLC). We used a Bio-rad HPX-87H organic acids column with 5 mM H 2 SO 4 as the eluent and a flowrate of 0.4 mL/min at 50 °C. Organic acids were detected at 210 nm. Cell densities of the cultures were determined by measuring optical density at 600 nm (GENESYS 20 Visible Spectrophotometer). Cell density samples were diluted as necessary, to fall within the linear range. A differential refractive index detector (Agilent, Santa Clara, CA) was used for analyte detection and quantification. Yields were calculated between two time points, whereas the cumulative yield was calculated between the initial and final measurements. Plasmids and strains Genes fucO and aldA were cloned from E. coli MG1655 genomic DNA and assembled using Gibson Assembly |
#ifndef INTERLINCK_CORE_PARSER_PARSER_H
#define INTERLINCK_CORE_PARSER_PARSER_H
#include <memory>
#include "interlinck/Core/Basic/Concepts.hpp"
#include "interlinck/Core/Syntax/SyntaxKinds.hpp"
#include "interlinck/Core/Types.hpp"
#include "Core/Parser/Lexer.hpp"
#include "Core/Support/AnalysisContext.hpp"
namespace interlinck::Core::Errors
{
class DiagnosticPool;
} // end namespace interlinck::Core::Errors
namespace interlinck::Core::Syntax
{
class ISyntaxNode;
class ISyntaxToken;
class SyntaxPool;
} // end namespace interlinck::Core::Syntax
namespace interlinck::Core::Parser
{
class Parser
{
public:
Parser() = delete;
virtual ~Parser() noexcept = default;
Parser(const Parser&) = delete;
Parser& operator=(const Parser&) = delete;
Parser(Parser&&) = delete;
Parser& operator=(Parser&&) = delete;
const Syntax::ISyntaxNode* parse() noexcept;
protected:
explicit Parser(std::shared_ptr<Lexer> lexer,
Support::AnalysisContext& context) noexcept;
virtual const Syntax::ISyntaxNode* parseRoot() noexcept = 0;
inline const Syntax::ISyntaxToken* currentToken() const noexcept { return _lexer->currentToken(); }
inline const Syntax::ISyntaxToken* takeToken(Syntax::SyntaxKind syntaxKind) const noexcept { return _lexer->takeToken(syntaxKind); }
inline const Syntax::ISyntaxToken* takeToken() const noexcept { return _lexer->takeToken(); }
inline const Syntax::ISyntaxToken* peekToken(il_size n) const noexcept { return _lexer->peekToken(n); }
inline void advance() const noexcept { _lexer->advance(); }
REQUIRES_ENUMERATABLE(ErrorCodeType)
inline void addError(ErrorCodeType errorCode) const noexcept { _lexer->addError(errorCode); }
REQUIRES_ENUMERATABLE(ErrorCodeType)
inline void addError(const Syntax::ISyntaxToken* token,
ErrorCodeType errorCode) noexcept
{
_lexer->addError(token, errorCode);
}
REQUIRES_ENUMERATABLE(ErrorCodeType)
inline void addError(const Syntax::ISyntaxNode* node,
ErrorCodeType errorCode) noexcept
{
_lexer->addError(node, errorCode);
}
protected:
std::shared_ptr<Lexer> _lexer;
Support::AnalysisContext& _context;
};
} // end namespace interlinck::Core::Parser
#endif // INTERLINCK_CORE_PARSER_PARSER_H
|
<reponame>MichelJansson/vscode-lua4rc<filename>src/extension.ts
"use strict"
import vscode = require("vscode")
import L4RCApiData from "./ApiData"
import { L4RCAutocomplete } from "./Autocomplete"
import { L4RCHover } from "./Hover"
const LUA_MODE = { language: "lua", scheme: "file" }
export function activate(context: vscode.ExtensionContext) {
let dataPath = context.asAbsolutePath("./data")
const l4RCApiData = new L4RCApiData(dataPath)
context.subscriptions.push(
vscode.languages.registerCompletionItemProvider(
LUA_MODE,
new L4RCAutocomplete(l4RCApiData),
'.'
)
)
context.subscriptions.push(
vscode.languages.registerHoverProvider(
LUA_MODE,
new L4RCHover(l4RCApiData)
)
)
}
// this method is called when your extension is deactivated
export function deactivate() {
} |
The Clayman Institute offers a two-year postdoctoral fellowship. Recent social science Ph.D.'s (including Sociology, History, Communications, Economics, Political Science, Psychology, and Anthropology) whose research focuses on gender with an intersectional perspective are eligible. We encourage scholars with a strong interest in interdisciplinary methods to apply.
While in residence at the Institute, postdoctoral fellows are expected to participate in Clayman Institute activities throughout the academic year in addition to pursuing their own research.
Postdoctoral fellows participate in our community of Clayman Institute faculty research fellows, faculty affiliates, and graduate dissertation fellows through their own research and contributions to our goal of reinvigorating gender equality. Postdoctoral fellows' responsibilities include contributing to the Clayman Institute's Gender News, working with Graduate Dissertation Fellows, participating in our graduate Voice and Influence Program, and attending our regularly scheduled faculty luncheon discussions.
The postdoctoral fellows play a critical role in Gender News, which is the public outreach component of the Clayman Institute and our affiliated scholars. To increase the impact of innovative work on gender equality, Gender News publishes articles on gender research for academic and general audiences. Postdoctoral fellows write articles for Gender News that include summaries of the latest peer-reviewed gender research, interviews with Institute faculty research fellows and faculty affiliates, and reviews of relevant academic conferences.
Meet our current postdoctoral fellows!
The appointment is for two years. Applicants must have their doctoral degree in hand no later than 30 days prior to the appointment start date, and the start date must be no more than three years after the awarding of their degree. Postdoctoral fellows receive a stipend, annual research fund, standard benefits, and are expected to be in residence for the duration of the fellowship.
If you are selected, what would your plans be in your first year as a Clayman Institute postdoctoral fellow?
If you are selected, what would be your longer-term career plans be after finishing your Clayman Institute postdoctoral fellowship?
3. OPINION PIECE: In 300 words or less, select a gender-focused current event and, using research, formulate an opinion piece. Make an argument for the way the general public should think about the event or issue you selected. Applicants should be able to make their research-based point succinctly and without supporting documentation. The successful candidate will adhere to the maximum 300 word limit. This piece should be single space paragraphs only. At the top of your opinion piece list only your initials, degree discipline and total number of words.
4. DISSERTATION RESEARCH SUMMARY: In 1000 words or less, single-spaced and including all notes and bibliography, provide an abstract that summarizes your dissertation.
5. CV: Provide a curriculum vitae of no more than 5 pages.
Two letters of recommendation. Confidential letters of recommendation are submitted via AcademicJobsOnline. Please submit only two letters of recommendation. Only your first two letters submitted will be considered.
For letter writers: for assistance with submitting letters of recommendation through AcademicJobsOnline, use this link to their "Reference Writer FAQ."
Applications must be submitted through our online application.
Applications are due January 10, 2019 by 11:59 PM Pacific Standard Time.
No exceptions will be made for late submissions.
Please address any questions regarding the Clayman Institute postdoctoral application process to Wendy Skidmore, Fellowship Manager.
Yes. Recent social science Ph.D.'s (including Sociology, History, Communications, Economics, Political Science, Psychology, and Anthropology) whose research focuses on gender with an intersectional perspective are eligible.
How many fellowships are awarded annually?
This cycle, one two-year postdoctoral award will be awarded and will begin no later than the first day of Autumn quarter in September 2019.
Fellowship awards will be announced in April 2019.
Can I defer a fellowship?
No. We do not accept deferrals. Should you be unable to accept a fellowship, the award will be offered to an alternate candidate. You may apply again the next year provided you meet all other eligibility requirements.
No. Applicants must be continuously enrolled for the duration of their fellowship and be on campus during the Autumn, Winter, Spring, and Summer quarters of the fellowship years.
Has my application been received?
Once all required fields have been completed and documents have been uploaded (excluding letters of recommendation), you will receive a confirmation email. Please carefully review the application requirements before submission. |
/**
* Tests that getKingPosition returns the correct position for each king.
*/
@Test
public void testGetKingPositionFor() {
Board board = new Board(new GameState());
Position whiteKing = new Position(4, 7);
Position blackKing = new Position(4, 0);
assertEquals(whiteKing, board.getKingPositionFor(Color.WHITE));
assertEquals(blackKing, board.getKingPositionFor(Color.BLACK));
} |
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.core.read.engine;
import java.util.BitSet;
/**
* A PopulatedOrdinalListener is (unless explicitly specified) automatically registered with each type
* in a {@link HollowReadStateEngine}. This listener tracks the populated and previous ordinals using
* BitSets.
*/
public class PopulatedOrdinalListener implements HollowTypeStateListener {
private final BitSet previousOrdinals;
private final BitSet populatedOrdinals;
public PopulatedOrdinalListener() {
this.populatedOrdinals = new BitSet();
this.previousOrdinals = new BitSet();
}
@Override
public void beginUpdate() {
previousOrdinals.clear();
previousOrdinals.or(populatedOrdinals);
}
@Override
public void addedOrdinal(int ordinal) {
populatedOrdinals.set(ordinal);
}
@Override
public void removedOrdinal(int ordinal) {
populatedOrdinals.clear(ordinal);
}
@Override
public void endUpdate() { }
public boolean updatedLastCycle() {
return !populatedOrdinals.equals(previousOrdinals);
}
public BitSet getPopulatedOrdinals() {
return populatedOrdinals;
}
public BitSet getPreviousOrdinals() {
return previousOrdinals;
}
}
|
Effect of 5-aza-2'-deoxycytidine combined with trichostatin A on RPMI-8226 cell proliferation, apoptosis and DLC-1 gene expression. This study was aimed to investigate the effects of the DNA methylation inhibitor 5-aza-2'-deoxycytidine (5-Aza-CdR) and histone deacetylase inhibitor trichostatin A (TSA) on DLC-1 gene transcription regulation and molecular biological behaviours in the human multiple myeloma RPMI-8226 cells. The cells were treated respectively with 5-Aza-CdR and TSA alone, or the both combination; the cell proliferation and apoptosis, DLC-1 expression, the protein expression of Ras homolog family member A (RhoA) and Ras-related C3 botulinum toxin substrate 1 (Rac1) were examined by CCK-8 method, RT-PCR and ELISA, respectively. The results showed that the 5-Aza-CdR and TSA had cell growth inhibitory and apoptosis-inducing effects in dose-dependent manner (P < 0.05). Compared with a single drug (5-Aza-CdR or TSA alone), the effects were significantly enhanced after treatment with the combination of 5-Aza-CdR and TSA (P < 0.05). DLC-1 was weakly expressed in the control group; the treatment with 5-Aza-CdR alone enhanced its re-expression dose-dependently (P < 0.05). Compared with 5-Aza-CdR alone, 5-Aza-CdR plus TSA enhanced DLC-1 re-expression significantly.Compared with the control, 5-Aza-CdR and TSA significantly decreased RhoA and Rac1 protein expression (P < 0.05). It is concluded that 5-Aza-CdR and TSA can effectively reverse DLC-1 expression of RPMI-8226 cells; TSA has a synergistic effect on its re-expression. 5-Aza-CdR and TSA have significant cell growth inhibitory and apoptosis-inducing effects on RPMI-8226 cells. These effects may be related to the inhibition of Rho/Rho kinase signalling pathway. |
<reponame>ericwithac/Benzinga<gh_stars>1-10
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Sample lists GCS buckets using the S3 SDK using interoperability mode.
package s3sdk
import (
"bytes"
"io/ioutil"
"log"
"os"
"strings"
"testing"
)
func TestMain(m *testing.M) {
log.SetOutput(ioutil.Discard)
s := m.Run()
log.SetOutput(os.Stderr)
os.Exit(s)
}
func TestList(t *testing.T) {
googleAccessKeyID := os.Getenv("STORAGE_HMAC_ACCESS_KEY_ID")
googleAccessKeySecret := os.Getenv("STORAGE_HMAC_ACCESS_SECRET_KEY")
if googleAccessKeyID == "" || googleAccessKeySecret == "" {
t.Skip("STORAGE_HMAC_ACCESS_KEY_ID and STORAGE_HMAC_ACCESS_SECRET_KEY must be set. Skipping.")
}
buf := new(bytes.Buffer)
_, err := listGCSBuckets(buf, googleAccessKeyID, googleAccessKeySecret)
if err != nil {
t.Errorf("listGCSBuckets: %v", err)
}
got := buf.String()
if want := "Buckets:"; !strings.Contains(got, want) {
t.Errorf("listGCSBuckets got\n----\n%s\n----\nWant to contain\n----\n%s\n----", got, want)
}
}
|
Transformation of Islamic Higher Education Institutions in Facing the Era of Industrial Revolution 4.0 The Fourth Industrial Revolution (IR 4.0) based on digital economy is driving various transformations in all fields, including the education system by focusing not only on technological changes but also human changes to get closer to their Creator. Despite the rapid pursuit of IR 4.0, the education system is still in line with the main objective which is to produce moral human beings. Thus, this article presents three main purposes; 1) to identify Education 4.0 according to the Quran, education, and IR 4.0; 2) to identify the challenges faced in the Education 4.0 implementation; and 3) to propose a competitive Education 4.0. This study employed a qualitative research methodology through data collection from several previous works. The findings of this study revealed that the incorporation of IR 4.0 in education not only creates employment opportunities in various fields but also simultaneously produces workforce with high morale, especially in universities to produce moral graduates. Introduction The rise of a digital industrial technology, known as an Industrial Revolution 4.0 (IR 4.0), is a transformation that faciliates data collection and analysis across machines to enable faster, more flexible, and more efficient processes in producing high quality goods at lower costs. This manufacturing revolution will increase productivity and economic shifts, foster industrial growth, and change the workforce profile which in turn changes the competitiveness of a company and sector. The IR 4.0 is changing the current world economy from a resource-based conventional economy to a digital economy depending on the industrial technology. The presence of IR 4.0 emphasizing the development of virtual reality technology with less use of manpower certainly affects many aspects of life. Groups of highly skilled and efficient workers are needed to accommodate robots and other automation systems to speed up production and productivity processes. Islamic Higher Education Education is a continuous process in educating and shaping people to become moral human beings in this world and the hereafter. From an Islamic perspective, education can be defined as a process of educating and training the human mind, physically and spiritually based on Islamic values of Quran and Sunnah so that humans will devote themselves to Allah SWT. The above definition clearly shows that Islamic education is holistic, integrated, and has a balanced paradigm. Apart from emphasizing intellectual and physical elements, Islamic education also offers spiritual components for a prosperous and content life in this world and hereafter. The goal of Islamic education is to develop and shape humans as servants and caliphs of Allah with knowledge, faith, taqwa, good deeds, and traits based on the Quran and al-Sunnah as well as enable them to contribute towards developing the nation and ummah. The Center for Curriculum Development developed the philosophy of Islamic education as follows: "Islamic education is a continuous effort to convey knowledge, skills, and appreciation of Islam based on the Quran and Sunnah in shaping attitudes, skills, personalities, and purpose of life as a servant of God with responsibility of developing oneself, society, environment, and nation towards achieving virtue in this world and the hereafter". Thus, religious education is not merely lecturing the subject to deliver its values but is actually a comprehensive learning to shape students' attitudes in theory and practice. Industrial Revolution 4.0 What is Industrial Revolution 4.0? It is a change of work that focuses on the patterns of digital economy, artificial intelligence, big data, and robotics amongst others. These changes are known as innovations that disrupt current world phenomena. In this era, the speed and use of internet technology can lead anyone to be a winner. Shwab provided an example to help understand how the industrial revolution changes over time. During IR 1.0, water and steam were used for production management, while IR 2.0 presented the use of electrical power for mass production. For IR 3.0, electronics and information technology were used to automate production. Quran, Education and Industrial Revolution 4.0 Education 4.0 is a response to the needs of IR 4.0 where human and technological changes are aligned to allow for new possibilities. This requires more interdisciplinary teaching and research as some features of the innovations. Is Education 4.0 more efficient and effective? The success of science in revealing the secrets of Allah SWT is supported by the increasingly sophisticated technology. Science and technology research in Islam should lead us to contemplation and provide awareness for human beings to better understand their Creator. As the word of Allah SWT states: "Indeed, in the creation of the heavens and the earth; the alternation of the day and the night; the ships that sail the sea for the benefit of humanity; the rain sent down by Allah from the skies, reviving the earth after its death; the scattering of all kinds of creatures throughout; the shifting of the winds; and the clouds drifting between the heavens and the earth-in all of this are surely signs for people of understanding. Despite the change of eras, Islam remains consistent and suitable for every period. The Quran is a relevant guide and can be used at any time without borders. There has never been a change in the verses of the Quran, and modern science has succeeded in revealing the secrets of Allah SWT contained in His book. There are nine trends related to Education 4.0. First, learning can be carried out anywhere using e-learning tools for distance and self-paced learning for theoritical classes, while interactive learning is performed in the classroom. Second, student learning is conducted individually, where difficult tasks are given to students only after a certain level of mastery is achieved. At this level, a support group is needed to promote positive learning and increase students' confidence in their own academic abilities. Third, students have the option to determine their learning methods. Fourth, students will be exposed to many learning projects through the use of knowledge and skills in completing some short-term projects. By engaging in projects, they will collaborate and manage their time well. Fifth, students will be exposed to field-based learning such as industrial training, coaching projects, and collaboration projects. Sixth, students will be exposed to data interpretation through theoretical understanding and reasoning skills to draw conclusions based on logic and trends from a given data set. Seventh, students will be assessed based on their level of knowledge during the learning process and field projects. Eighth, student opinions will be considered in designing and updating the curriculum. Their input will help the development of the university curriculum to maintain the latest and quality curriculum framework. Finally, students are subjected to independent learning guided by instructors or facilitators through their learning process. The nine trends in Education 4.0 by Fisk explains the shift of key learning responsibilities from educators to students. Educators need to play their part to support the transition and should not consider it as a threat to conventional teaching. Education in Malaysia The implementation of e-learning in the Malaysian education curriculum today is also seen to be beneficial to the Gen Z and Alpha. The media that initially started from the transfer of information through radio has dramatically expanded to its digital counterpart, giving impact and creating public awareness that communication methods and knowledge distribution are no longer just through whiteboard and chalk as in the past. According to Amin and Norazah, lectures in lecture rooms are no longer conventional for university graduates to be in line with current creative and innovative technology by applying digital technology. Therefore, educators should take steps to identify the types of technologies that are able to connect the understanding of the new generation to that of the older generation. This is due to the availability of tools such as computers, smartphones, and software that are easy to use when studying either inside or outside of the classroom. One of the government's efforts to increase innovation activities and programs in education is to develop the Education Development Master Plan (EDMP). The role of education has been outlined as the success of the EDMP mission to enhance the country's knowledge and innovation capacity as well as to develop the first class. Role of Educators The teaching staff in Islamic educational institutions should not only play their role as Murabbi and Muaddib but also as Mu'allim, Students are not only equipped with knowledge and skills but also good religious and moral strength. Thus, educators should possess spiritual strength with the help and support from the community as the foundation of human development. Therefore, Islam emphasizes self-esteem and integrity including 'aqidah, ibadah, and akhlaq to produce pious people (Asmawati & Aderi, 2016; Amzar & Noorsafuan, 2017). Teaching and learning innovations are important features to be cultivated among educators because it is part of their main task (Halim, 2006;Azmi & Halim, 2007). It is undeniable that education plays an important role in determining the future of society and nation. The next generation will highly depend on the education patterns provided. 21st Century Learning Skills Fisk explains that new learning vision encourages students to discover not only skills and knowledge but to identify resources for skills and knowledge learning. There are seven soft-skills, namely communication, critical thinking, problem solving, teamwork, lifelong learning, entrepreneurial, ethical and moral roles, professionalism, and leadership skills (Nikitina & Furuoka, 2012). Based on the World Economic Forum, soft skills in IR 4.0 involve 10 skills, such as complex problem solving, critical thinking, creativity, human management, relationships with others, emotional intelligence, judgment and decision making, service orientation, consultation, and cognitive flexibility skills. Cotet et al. list the dimensions of soft skills required in the IR 4.0 which are interpersonal, personal pledges, respect, self-strength, empathy, desire, perfection, selfdiscipline, intellectual curiosity, liberalism, freedom, and creativity skills. KPM focuses on five main principles consisting of critical thinking, problem solving, communication, cooperation, and creativity. he success of the 21st learning approach depends on the intelligence of teachers as professionals who can integrate practical knowledge in classroom concepts and teaching methods (PdPc) (). A study by Anuar and Zakaria found that students in the education field at UiTM were able to use technology easily and have the necessary technical skills in educational learning. A majority of students agreed that they have adequate knowledge and are able to seek various ways to use technology to develop an understanding of education. Challenges and Obstacles in Implementation Industrial development is a challenge for educational institutions, especially universities, in providing competent graduates. It has started since the beginning of the first industrial revolution, where human beings were no longer needed for their energy and abilities. In addition, technological advances have also resulted in the retrenchment of employees from their positions as their roles have been replaced by machines. In fact, future demands will be greatly challenging for employees with lack of skills, innovation, and ability to operate various technological devices in the industry. Therefore, university graduates need to obtain better abilities than high school graduates. To be able to compete in highly competitive industries, human resources need to have skills, innovation, a mastery in technology and information, and survivability (). Therefore, the role of higher education is significant in providing the younger generation with good vision and knowledge in the form of theory and practice, as well as various skills and soft skills upgrades so that the students will be prepared to make the best of the IR 4.0. Moreover, the aspect of educators' knowledge is also important as not all teachers have the skills and the exposure to technological tools to implement innovative teaching in the classroom. A study conducted by Ilias and Ladin found that the knowledge of the teaching staff on IR 4.0 was still at a moderate level. Changes in the context of continuous learning and information management through the evolution of the internet have challenged the concepts and theories of traditional education, particularly on the notions of classrooms and teaching methods (PdPc). The impact of technology will change social relationships and future generations. As software programs are enhanced, technology-based learning should also be mastered. Furthermore, the IR 4.0 also poses more challenges related to moral establishment and character of the younger generation. From an observation study conducted in a university environment, Raka postulated that students were eager to use their study period not only to enhance their knowledge but also to enhance their own identities. This indicated that universities have a very complex function as educational institutions. Higher education will generally shape the character of students by guiding local wisdom and social culture of the local community. Religious education is also an essential element in shaping the qualities of students. The importance of teaching morals and shaping the character of students is to motivate them to frequently act according to the norms and teachings of religion in the era of IR 4.0. A study conducted by Robles found that some weaknesses in the integration of soft skills in the classroom were due to the workload of teachers, excessive syllabi, and unclear approaches in teaching soft skills. The teaching of soft skills is more challenging compared to the teaching of skills in academic subjects or hard skills. The application of soft skills can increase involvement in learning and cultivate communication, critical thinking, and creativity skills. The latest generation Z ; Alpha have unique and different characters which requires an in-depth understanding. According to Tapscott, these generations are born surrounded by extensive use of the internet and the way they obtain and understand any knowledge including the world is by navigating through a particular issue. Smith and Anderson stated that 48% of experts have predicted that robots and digital agents will lead to unemployment by 2025, while the remaining 52% perceived that technology will not replace jobs but without up-to-date knowledge and skills will trigger the inability to compete. Views and Recommendations Transformation and innovation in teaching and education in Malaysia can be assessed through the current level of learning and development of students. Changes in student learning can be seen through a holistic approach that emphasizes intellectual, spiritual, emotional, and physical values in line with a strong national identity (Malaysia Education Development Plan, 2012). In the Malaysian Education Development Plan (MEDP) 2013-2025, educators are the drivers in the educational development by further strengthening the nation's civilization through the invention of dynamic human module. Through quality and established education, we are able to produce world-class students with critical thinking skills of the 21st century in step with the National Education Philosophy. The goals that have been outlined in the MEDP 2013-2025 are as follows: 1) to fully develop the potentials of individuals in terms of physical, emotional, spiritual, and intellectual aspects; 2) to enhance students' creativity, innovation, and culture; 3) to involve science and technology as well as lifelong learning; 4) to provide more efficient, effective, and world-class education system; 5) to make Malaysia a center of educational excellence; and 6) to improve the quality of the Malaysian education system internationally. Conclusion Industrial revolutions from 1.0 until 4.0 provide opportunities for employment to those with the necessary skills in various fields. The employment world is seeking employees equipped not only with skills in general science but also good moral qualities. Therefore, it is a necessity for Islamic higher learning institutions to provide graduates with competency, professionalism, and superior moral characteristics who will meet the demands for various professional fields. |
Elastography as a predictor of liver cirrhosis complications after hepatitis C virus eradication in the era of direct-acting antivirals Chronic inflammation due to hepatitis C virus (HCV) infection leads to liver fibrosis and rearrangement of liver tissue, which is responsible for the development of portal hypertension (PH) and hepatocellular carcinoma (HCC). The advent of direct-acting antiviral drugs has revolutionized the natural history of HCV infection, providing an overall eradication rate of over 90%. Despite a significant decrease after sustained virological response (SVR), the rate of HCC and liver-related complications is not completely eliminated in patients with advanced liver disease. Although the reasons are still unclear, cirrhosis itself has a residual risk for the development of HCC and other PH-related complications. Ultrasound elastography is a recently developed non-invasive technique for the assessment of liver fibrosis. Following the achievement of SVR, liver stiffness (LS) usually decreases, as a consequence of reduced inflammation and, possibly, fibrosis. Recent studies emphasized the application of LS assessment in the management of patients with SVR in order to define the risk for developing the complications of chronic liver disease (functional decompensation, gastrointestinal bleeding, HCC) and to optimize long-term prognostic outcomes in clinical practice. INTRODUCTION Hepatitis C virus (HCV) infection is one of the major causes of chronic liver disease and a significant cause of morbidity and mortality worldwide. In 2015, it was estimated that over 70 million people were affected, most of whom were unaware of the infection. Chronic inflammation due to HCV infection leads to liver fibrosis and rearrangement of liver tissue, which is responsible for the development of portal hypertension (PH) and other complications. Moreover, inflammation and microenvironmental changes are known risk factors for the occurrence of hepatocellular carcinoma (HCC). The advent of direct-acting antiviral drugs (DAAs) has revolutionized the natural history of HCV infection, providing an overall eradication rate of over 90% associated with a remarkable safety profile in all stages of chronic liver disease. The achievement of sustained virological response (SVR) prevents the development of cirrhosis in the early stages of the disease and significantly reduces the risk of HCC and PH-related events, such as ascites, hepatic encephalopathy, hepatorenal syndrome, infections and gastrointestinal bleeding, in patients with advanced liver disease. However, initial reports have warned of an increased risk of HCC in patients who achieved SVR after treatments with DAAs. On the other hand, other studies have shown a protective effect on the development of HCC. More recently, a meta-analysis analyzing 41 studies concluded that there is no evidence for increased occurrence or recurrence of HCC in patients treated with DAAs compared with interferon-based therapies. Despite a significant decrease after SVR, the rate of HCC and liver-related complications is not completely eliminated in patients with advanced liver disease. Although the reasons are still unclear, cirrhosis itself has a residual risk for the development of HCC and other PH-related complications. At present, there are no validated predictors to estimate the risk of HCC and PH-related events after HCV eradication. Ultrasound elastography is a recently developed non-invasive technique for the assessment of liver fibrosis. Vibration controlled transient elastography (VCTE), is the oldest share-wave-based method and the reference standard in this field. The device is equipped with a one-dimensional probe, where a vibrator sends low frequency shear waves through the liver. Wave propagation, evaluated by an ultrasound receiver inside the probe, is directly related to liver tissue elasticity. Since its emergence, this technique has provided a fast point-of-care estimate of liver fibrosis in daily clinical practice, avoiding the complications of liver biopsy. Indeed, several studies using histology as the reference standard defined accurate thresholds that are able to distinguish the different stages of liver fibrosis. In the last few years, new ultrasound based elastographic techniques have been developed. They are embedded into conventional ultrasound devices, allowing visualization of the sampling area. The two main categories are the point shear wave elastography (pSWE) and bidimensional SWE (2D-SWE). All these devices are able to evaluate the elastic properties of the liver during real-time B mode imaging. In particular, the ultrasound probe generates short-duration acoustic impulses in a small region of interest that causes soft tissue displacement and shear waves running in the perpendicular plane. Shear wave travelling speed can then be quantified and interpreted as a measurement for liver stiffness (LS). To date, LS measurement (LSM) is recommended by the European Association for the Study of Liver Disease (EASL) and the American Association for the Study of Liver Disease (AASLD) guidelines for the assessment of liver disease severity in patients with HCV infection eligible for DAAs. Following the achievement of SVR, LS usually decreases, as a consequence of reduced inflammation and, possibly, fibrosis. Recent studies evaluated the usefulness of LS assessment after HCV eradication and the prediction of HCC and other PH-related complications in patients with advanced liver disease. In this review, we summarize the current evidence on the role of ultrasound elastography in the prediction of liver-related outcomes of patients with HCV infection treated with DAAs. DIRECT-ACTING ANTIVIRAL AGENTS AND LIVER FIBROSIS Despite DAAs being pharmacologically designed only for the eradication of HCV infection and since HCV is directly responsible for liver injury and consequent parenchymal fibrosis, the achievement of both SVR and anti-fibrotic effect results in advantages in terms of prevention of chronic liver disease complications (Table 1). Different non-invasive methods traditionally used to assess liver fibrosis such as VCTE and the Fibrosis-4 (FIB-4) score (based on patient's age, transaminases levels and platelet count) and aspartate aminotransferase to platelet ratio index (APRI score) have been evaluated for staging chronic liver disease and predicting hepatic fibrosis in patients with HCV infection. It has been demonstrated that baseline LSM by VCTE together with FIB-4 and APRI score have an important role in the prediction of treatment outcome in the new era of DAAs and could be integrated in pre-treatment assessment as a guide for treatment decisions and optimization of patient management. Many authors have documented the improvement of VCTE, FIB-4 and APRI score after DAAs treatment. However, it is not clear if this finding is a true recovery of liver fibrosis or represents only an epiphenomenon of the reduction in liver inflammation resulting in the normalization of blood tests and decrease of LS values. The retrospective study by Elsharkawy et al analyzed a group of 337 Egyptian patients with chronic genotype 4 HCV infection who underwent sofosbuvir-based treatments. Among the patients evaluated, 29.1% had non-relevant fibrosis (F0-1; VCTE < 7.1 kPa), 17.2% were included in the F2 group (7.1 kPa ≤ VCTE < 9.5 kPa), 8.6% in the F3 group (VCTE ≥ 9.5 kPa) and 45.1% were classified as cirrhotic (F4; ≥ 12.5 kPa). One year after treatment, 77% of responders (with any stage fibrosis) and 81.8% of cirrhotic patients had a valuable recovery in liver fibrosis parameters (measured with FIB-4 and APRI score), due to the increase in platelet count and decrease in transaminase levels together with a reduction in LS values (11.8 ± 8.8 kPa vs 14.8 ± 10.7 kPa, P = 0.000). A higher number of patients with poor LS improvement after DAAs-therapy was observed in cases with low baseline LS values and infection relapse. In a group of 42 patients treated with DAAs, Chekuri et al demonstrated a significant decrease in LS values at SVR 24 wk after the end of treatment (median values: 10.40 kPa vs 7.60 kPa, P < 0.01), without significant improvement in the follow-up. Abdel Alem et al used pre-treatment liver fibrosis (measured by VCTE and FIB-4 score) as a predictor of treatment outcome after sofosbuvir-based regimens in 7256 HCV patients (46.6% cirrhotic,91.4% with SVR12). Both, baseline FIB-4 and VCTE were significantly lower in the group with SVR (2.66 ± 1.98 kPa and 17.8 ± 11.5 kPa, respectively) compared to relapsers (4.02 ± 3.3 kPa and 24.5 ± 13.9 kPa, respectively). Based on these results, the authors concluded that fibrosis stage is a crucial element in the evaluation of treatment outcome and disease prognosis. In particular, a LS value higher than 16.7 kPa resulted as an unfavorable prognostic factor for treatment response (relapse rate 13%), probably related to an impaired immune-mediated HCV clearance that is worsened in advanced liver fibrosis. Similar considerations were drawn by Neukam et al in patients treated with pegylated interferon/ribavirinbased therapy associated with NS3/4A protease inhibitor (PR-PI) and patients under DAAs therapy. In the PR-PI group, SVR12 was obtained in 59.6% of patients with LS < 21 kPa and in 46.5% of subjects with LS ≥ 21 kPa (P = 0.064); in the DAAs group, Many studies reported a significant reduction in liver fibrosis markers after treatment with DAAs. In particular, Bachofner et al highlighted a 32.4% drop in VCTE values from 12.65 kPa to 8.55 kPa (P < 0.001), a reduction of FIB-4 from 2.54 to 1.80 (P < 0.001) and a decrease of APRI from 1.10 to 0.43 (P < 0.001). DIRECT-ACTING ANTIVIRAL AGENTS AND LIVER CIRRHOSIS RELATED EVENTS Even though DAA-therapy leads to HCV eradication and to the regression of liver inflammation, it does not eliminate the risk of possible PH-related complications and HCC, increasing the necessity for post-SVR surveillance and the development of noninvasive predictive models to detect the categories of patients requiring more intensive follow-up (Table 2). To this purpose, Trivedi et al suggested a VCTE-based algorithm in order to schedule the controls of patients with SVR after HCV eradication: In the case of mild fibrosis (F1) without liver-related comorbidities, regular monitoring with the primary care physician is indicated; for advanced fibrosis/cirrhosis (F3-4), routine HCC and variceal surveillance is prescribed (six-monthly ultrasound, upper endoscopy every 2-3 years, annual non-invasive fibrosis assessment); for moderate fibrosis (F2) or in the case of concomitant liver-related comorbidities an annual non-invasive fibrosis measurement should be performed. The importance of liver fibrosis stage in the development of liver-related complications was confirmed by Kozbial et al, who analyzed 551 patients treated with DAAs for a median period of 65.6 wk: No complications were registered in patients with severe fibrosis, whereas 9.1% of subjects with compensated cirrhosis developed liver-associated complications including HCC (4.1%). Furthermore, the presence of decompensated cirrhosis was markedly associated with the development of complications and mortality. Even though histology remains the gold standard in evaluating fibrosis, liver biopsy presents some potential obstacles such as patient compliance, severe post-procedural complications, and sampling errors. For this reason, elastography has been proposed as a possible non-invasive alternative to biopsy for patient surveillance after SVR. VCTE is gaining growing importance as a predictive element in the assessment of the risk of developing esophageal varices or gastrointestinal bleeding, liver functional decompensation and HCC. The retrospective study by Mandorfer et al was the first to compare Hepatic Venous Pressure Gradient (HVPG) measurement with VCTE for the assessment of PH and showed a good agreement between the techniques. The authors also observed that a PH decrease after SVR was less likely in subjects with baseline HVPG higher than 16 mmHg and severe liver function impairment. The review by Garbuzenko et al confirmed that staging the severity of PH in cirrhotic subjects and personalized preventive therapy could lead to an increase in both patient survival and treatment effectiveness; particularly, DAAs achieve the amelioration of subclinical PH. In a recent study by Afdhal et al of 50 patients with clinically significant PH (presence of esophageal varices, HVPG > 6 mmHg) from different international centers, 89% obtained a HVPG reduction of > 20% and only 3 patients obtained a reduction of portal pressure to less than 12 mmHg. Paternostro et al endorsed spleen stiffness measurement (SSM) through elastography (especially pSWE and 2D-SWE) as an effective tool for high-risk varices assessment in chronic liver disease, especially in distinguishing between small and large varices as confirmed by Sharma et al. Previously, both Colecchia et al and Fraquelli et al had underlined the efficacy of LSM and SSM association in the assessment of HVPG and prediction of gastroesophageal varices in cirrhotic patients, showing a very high sensitivity (98% and 100% in the two studies, respectively), and economic advantages following the implementation of endoscopic screening progr-November 27, 2021 Volume 13 Issue 11 ams. However, there are some important limitations related to SSM: It is an operatordependent measurement and the upper limit of VCTE is fixed to a fibrosis value of 75 kPa that, in the case of severe PH, could be widely exceeded by SSM unlike LSM. Concerning the latter issue, Calvaruso et al demonstrated the superior predictive value of SSM for high-risk varices, adopting a modified VCTE unit with a maximum stiffness value of 150 kPa (AUC: 0.80 for SSM vs 0.71 for LSM). It has been demonstrated that the association of LSM with other non-invasive items (e.g. platelets, SSM) has a powerful positive predictive value in the detection of esophageal varices: Stefanescu et al created a simple diagnostic algorithm with the combination of LSM and SSM (cut-off: 19 kPa and 55 kPa, respectively), thus reaching a 93% sensibility and a 95% positive predictive value. Wang et al observed that the combination of Baveno VI criteria with SSM (with 46 kPa cut-off) might help to avoid 61.6% of esophagogastroduodenoscopies in HBVrelated cirrhosis with persistent viral suppression due to antiviral therapy, missing less than 5% high-risk varices. An interesting analysis by Fofiu et al evaluated a score based on the combination of LSM, SSM and spleen size as non-invasive predictors of high-risk varices in compensated cirrhosis, proving a better performance of the association of the three elements compared to each parameter alone. However, a meta-analysis by Ma et al found that SSM alone is superior to LSM in predicting any grade esophageal varices, thus turning out to be useful in clinical practice, especially in the case of nonmeasurable LSM (multifocal HCC, biliary obstruction or liver metastasis). Semmler et al underlined the predictive value of LSM by VCTE included in a non-invasive algorithm together with von Willebrand factor-platelet count ratio as a useful method to define PH, stratify risk categories and predict liver decompensation and HCC development in patients with HCV-related advanced chronic liver disease treated with DAAs. These results could be very interesting in introducing the concept of a tailored follow-up strategy. It is still not clear if the improvement in non-invasive markers after SVR could be associated to a decline in PH itself. However, in a recent study, Thabut et al noted that subjects with previous unfavorable Baveno VI status (LS > 20 kPa, platelets < 150000/mm 3 ) who experienced platelets increase and/or LS reduction after SVR reached a favorable Baveno VI class, with a subsequent reduction in the probability of PH progression and development of esophageal varices. A decrease of PH has also been demonstrated by Giannini et al in a group of 52 patients with advanced fibrosis/cirrhosis at baseline followed for approximately 60 wk after SVR with DAAs. A significant improvement in HVPG was detected, together with a decrease in LS values (from 15.2 kPa at baseline to 9.3 kPa at the end of follow-up), APRI and FIB-4 score, spleen bipolar diameter and an increase in platelet count. As the role of these indices is quite limited, other non-invasive methods have been proposed to detect varices at high risk of bleeding: Considering the worldwide low availability of TE, Jangouk et al demonstrated the effectiveness of Baveno VI consensus criteria as a non-invasive method to identify patients with compensated liver cirrhosis and low-risk of varices requiring endoscopic treatment. In particular, the authors highlight the uppermost role of both platelet count (> 150000/mm 3 ) and MELD score (< 6) in defining a low probability of high-risk varices. Chen et al demonstrated the efficacy and extremely high negative predictive value (97.1% in the study group and 98.1% in the validation cohort) of the association of albumin-bilirubin grade with platelet count (ALBI-PLT score) in the screening of high-risk esophageal varices in subjects with HCC: The 5-year variceal hemorrhage rate was 9.7% in patients with ALBI-PLT score > 2 (decompensated liver disease) as compared to 1.7% in those with a score of 2 (P = 0.007). Baveno VI guidelines indicate platelet count and VCTE as effective elements in the identification of cirrhotic patients who are at high-risk of developing esophageal varices: Due to the not-always easy access to VCTE (for example, in the case of inmates) or to the unavailability of adequate instrumentation in all hepatological centers, Calvaruso et al proposed the "Rete Sicilia Selezione Terapia-HCV" algorithm as an effective and simple tool (based only on blood tests: Platelet count and serum albumin level) that could substitute Baveno VI criteria in the identification of HCV-cirrhotic patients with medium/large varices, thus simplifying the diagnosis of the complications of PH, with a reduction of more than 30% of useless endoscopic exams and diminishing the risk of false-negative results. The implications of HCV eradication on HCC development are even more complex. Despite the widely demonstrated efficacy of DAAs in both achieving SVR and a reduction in liver fibrosis, there is no corresponding decrease in HCC development risk. These data led to an initial alert claiming the possibility of a DAAs-driven November 27, 2021 Volume 13 Issue 11 oncogenic mechanism, even if this theory was subsequently proved wrong by other studies. The mechanism of HCC development post SVR is probably sustained by a "point of no-return" in HCV pathogenesis that determines the loss of the potential benefits brought by viral eradication. This evidence highlights the necessity for optimizing regular HCC surveillance with a particular focus on patients with advanced fibrosis or cirrhosis. In fact, even though a decrease in LS values from cirrhosis to advanced fibrosis was observed in some cases after DAAs therapy, patients with SVR maintained an elevated HCC risk. Whether the HCC risk of patients with SVR coincides with that of viremic subjects is still a matter of debate. In the case of precariously compensated or decompensated liver function, the achievement of SVR could be useful to reduce the risk of HCC because of the decrease in intrahepatic inflammatory processes, despite the persistence of PH and decompensated liver function (that increase the risk of liver cancer in cirrhotic patients). Both EASL and AASLD guidelines recommend continuing ultrasound surveillance in subjects with advanced fibrosis/cirrhosis despite histological response to treatment and suggest accurate definition of the additional baseline risk-factors profile. Rinaldi et al assessed the importance of both baseline LS evaluation and ultrasound liver surveillance for the risk of HCC in patients with HCV-related cirrhosis, treated with DAAs: Among 258 subjects enrolled, divided into three groups according to liver fibrosis stage (< 20 kPa, from 20 kPa to 30 kPa, > 30 kPa), 35 developed HCC during follow-up. The group with LS higher than 30 kPa had a statistically significant increase in HCC risk . Even though the mechanisms directly involving HCV in both fibrogenesis and oncogenesis have not yet been completely explained, it seems crucial to define the degree of liver fibrosis through VCTE and FIB-4, in order to set appropriate HCC screening and the subsequent therapeutic strategy. Many attempts have been made to create prognostic scores to evaluate the risk of HCC development in chronic liver diseases, considering other criteria than PH alone. An interesting example is represented by the King score that includes laboratory parameters (platelet count and bilirubin levels) and gene signature, and classifies cirrhotic patients with HCV infection into three risk categories for functional decompensation, HCC and death. However, it is not clear if this score maintains its predictive efficacy in patients with SVR. Ravaioli et al studied 139 cirrhotic patients treated with DAAs, analyzing the difference between LS at baseline and at the end of treatment: They found a lower reduction of LS in patients who developed HCC compared to patients who did not (-18.0% vs -28.9%, P = 0.005). Recent studies demonstrated that LS assessment after SVR could be an inaccurate method to define the grade of fibrosis in patients treated with DAAs. In fact, the fast modifications in LS could be determined by both the reduction of liver inflammatory activity and the narrowing of fibrotic septa, without real histological improvement in fibrosis grading as demonstrated by liver biopsy. Notwithstanding, LS evaluation by VCTE remains a cornerstone in the assessment of HCC risk after SVR, especially due to its non-invasiveness. Masuzaki et al demonstrated that HCC risk was 45.5 times higher in patients with LS values higher than 25 kPa. However, it becomes important in the association to other elements in a more complete non-invasive score. Among them, we can include: Age, alcohol abuse, pretreatment advanced fibrosis/cirrhosis, platelet count, steatosis, diabetes, alfa fetoprotein (AFP), baseline gamma-glutamyltransferase (GGT) levels together with ethnic and environmental factors. All these factors have been studied in patients treated with interferon-based therapies with interesting results. During the pre-DAAs era, studies on the complications of liver cirrhosis after HCV-treatment showed that SVR and fibrosis regression did not prevent hepatic carcinogenesis. D'Ambrosio et al found that 13% of patients who responded to interferon-based treatments, developed HCC during an 8-year follow-up (17% cumulative probability and 1.2% annual incidence rate) whereas neither variceal-bleeding nor liver-function decompensation occurred. Higher baseline levels of GGT and glycemia were identified as risk factors for HCC development. Similarly, Toyoda et al demonstrated that diabetes mellitus and FIB-4 index increase represent risk factors for HCC after SVR with interferon-based regimens, thus suggesting continuing active surveillance in these groups of patients. In a prospective analysis of 1927 patients with HCV-related cirrhosis, receiving DAAs in ten tertiary Italian liver centers, Lleo patients/year. They found that treatment failure and high AFP levels represent independent predictors of HCC development, while SVR and absence of PH are associated with a lower HCC incidence, suggesting that HCC risk stratification should rely on the presence of PH and elevated baseline AFP levels. It has been suggested that PH as a complication of liver fibrosis (more than fibrosis itself) may represent an independent risk factor for HCC. Afdhal et al analyzed 50 patients with HCV-related liver cirrhosis treated with DAAs and observed a significant reduction in HVPG values during long-term follow-up after SVR: 24% of all patients and 89% of subjects with baseline HVPG ≥ 12 mmHg who reached SVR had a ≥ 20% reduction in HVPG. With regard to LS, a more evident improvement was observed in patients who did not develop HCC during follow-up (42.6% reduction in patients without HCC vs 13.6% in the HCC group), thus proposing a protective role of HVPG and LS against HCC development. In a recent retrospective study performed in patients with SVR after DAAs, Hamada et al, identified six variables that could be included in the HCC prediction model: Age, body mass index, platelet count, albumin, AFP, LS and FIB-4 index. Following multivariate analysis they found that age ≥ 75 years, AFP ≥ 6 ng/mL, and LS ≥ 11 kPa were independent risk factors for hepatocarcinogenesis (risk ratio: 35.16, 43.30 and 28.71, respectively; P = 0.001, 0.003 and 0.006, respectively). In particular, patients with LS < 11 kPa had a cumulative HCC incidence of 1.3% at 12 mo, 24 mo, 36 mo and 48 mo, while in the group with LS > 11 kPa the HCC incidence rate was 4.6% at 12 mo and 24 mo, 24.8% at 36 mo and 62.4% at 48 mo. The role of LSM in the development of a prediction model for HCC has also been emphasized by Feier et al. They confirmed that high levels of AFP, transaminases and LS are excellent predictors of HCC but underlined the importance of interquartile range (IQR) in LSMs. This led to the hypothesis of "stiffness shadow" that indicated an inhomogeneous shear stress due to the chaotic tumoral growth in the already hard cirrhotic tissue, with relevant diagnostic repercussions. The overall prognostic model combining the four variables demonstrated relevant results both in the training and validation phase with a positive relation with tumor size. The four parameters together showed a 64.5% HCC prediction, with LS alone reaching the highest predictive power. The authors concluded that an elevation in LS values and IQR during follow-up could enhance the diagnostic skill towards early HCC. It is interesting to note that some genetic factors also seem to be involved in hepatocarcinogenesis, despite the lack of clear evidence and the need for further prospective studies. In their cohort of 200 patients with HCV-related cirrhosis with SVR after DAAs, Simili et al noted a strong association of the single-nucleotide polymorphism of interleukin 28 (IL28B-rs12979860) with HCC development (both de novo and disease recurrence); furthermore, they observed a relation of HCC with lower levels of serum retinol and the presence of another two polymorphisms: Major histocompatibility complex class I polypeptide-related sequence A gene (MICA) and tolloid-like 1. The latter has proven particularly controversial since its oncogenic role was stated by Matsuura et al but denied by Degasperi et al: The difference between these studies could be ascribed to the different allele frequency or the presence of still unknown cofactors in the two ethnic groups (Japanese and Caucasian) or to discrepancies in the length of the follow-up period. CONCLUSION DAAs-therapy has brought about an effective revolution in hepatology resulting in HCV eradication in a wide range of patients and eventually reducing liver fibrosis after SVR. However, these benefits have not erased the risk of developing liver disease-related complications and in particular HCC and PH associated events. For this reason, it is crucial to continue long-term systematic surveillance after HCV eradication focusing on the subjects with a high-risk score. Due to its accuracy, cost-effectiveness and non-invasiveness, together with specific clinical and laboratory parameters, LSM is gaining a relevant role in the construction of algorithms assessing both liver fibrosis and PH. The potential application of this non-invasive and simple method has been emphasized especially in the management of patients with SVR in order to define the risk to develop the complications of chronic liver disease (functional decompensation, gastrointestinal bleeding, HCC) and optimize long-term prognostic outcomes in clinical practice. |
Emerging regulators of vascular smooth muscle cell function in the development and progression of atherosclerosis. After a period of relative senescence in the field of vascular smooth muscle cell (VSMC) research with particular regards to atherosclerosis, the last few years has witnessed a resurgence, with extensive research re-assessing potential molecular mechanisms and pathways that modulate VSMC behaviour within the atherosclerotic-prone vessel wall and the atherosclerotic plaque itself. Attention has focussed on the pathological contribution of VSMC in plaque calcification; systemic and local mediators such as inflammatory molecules and lipoproteins; autocrine and paracrine regulators which affect cell-cell and cell to matrix contacts alongside cytoskeletal changes. In this brief focused review, recent insights that have been gained into how a myriad of recently identified factors can influence the pathological behaviour of VSMC and their subsequent contribution to atherosclerotic plaque development and progression has been discussed. An overriding theme is the mechanisms involved in the alterations of VSMC function during atherosclerosis. |
Ancillary Ligands Impact Branching Microstructure in Late-Transition-Metal Polymerization Catalysis The influence of the labile ligand on the rate of -hydride elimination (BHE) for salicylaldiminato Ni(II) complexes is shown using a series of precatalysts which differ only in the labile ligand but produce polyethylenes with a range of molecular weights (Mn = 466 to 100 kg mol1), degrees of branching (1.7 to 7.3 branches/1000 C), and melting temperatures (from 132 to 123 °C) under the same conditions. The use of a weakly coordinating solvent (diethyl ether) was able to suppress this increase in BHE. DFT studies on a related salicylaldiminato Ni(II) complex show that BHE can feasibly occur following recombination of the labile ligand with the catalyst. |
. Oxidative stress at the retinal pigment epithelium (RPE) is involved in the pathophysiology of age-related macula degeneration (ARMD). Observations on a clinical or laboratory level have revealed that supplementation of antioxidative scavengers failed in many cases. A potential therapeutic target is the cellular signal transduction cascade initiated by oxidative stress which results, e. g., in altered expression of pro- and antiagiogenic factors as well as induction of apoptosis. This review summarises the current literature on cellular effects of free radicals and deduces potential therapeutic approaches to protect the RPE from oxidative damage. |
package com.anychart.anychart;
import com.anychart.anychart.chart.common.ListenersInterface;
import java.util.Locale;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
// chart class
/**
* Mekko chart class.<br/>
To get the chart use any of these methods:
<ul>
<li>{@link anychart#mosaic}</li>
<li>{@link anychart#mekko}</li>
<li>{@link anychart#barmekko}</li>
</ul>
*/
public class ChartsMekko extends SeparateChart {
protected ChartsMekko(String name) {
super(name);
js.setLength(0);
js.append(String.format(Locale.US, "chart = %s();", name));
jsBase = "chart";
}
public void setOnClickListener(ListenersInterface.OnClickListener listener) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append("chart.listen('pointClick', function(e) {");
if (listener.getFields() != null) {
js.append("var result = ");
for (String field : listener.getFields()) {
js.append(String.format(Locale.US, "'%1$s' + ':' + e.point.get('%1$s') + ',' +", field));
}
js.setLength(js.length() - 8);
js.append(";");
js.append("android.onClick(result);");
} else {
js.append("android.onClick(null);");
}
js.append("});");
ListenersInterface.getInstance().setOnClickListener(listener);
}
/**
* Adds series to the chart.
*/
public void addSeries(List<DataEntry> data) {
if (isChain) {
js.append(";");
isChain = false;
}
if (!data.isEmpty()) {
StringBuilder resultData = new StringBuilder();
resultData.append("[");
for (DataEntry dataEntry : data) {
resultData.append(dataEntry.generateJs()).append(",");
}
resultData.setLength(resultData.length() - 1);
resultData.append("]");
js.append(String.format(Locale.US, "var " + ++variableIndex + " = " + jsBase + ".addSeries(%s);", resultData.toString()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".addSeries(%s);", resultData.toString()));
js.setLength(0);
}
}
}
/**
*
*/
public void addSeries(View view) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(view.generateJs());
js.append(String.format(Locale.US, "var " + ++variableIndex + " = " + jsBase + ".addSeries(%s);", view.getJsBase()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".addSeries(%s);", view.getJsBase()));
js.setLength(0);
}
}
private PlotController getAnnotations;
/**
* Getter for the annotations.
*/
public PlotController getAnnotations() {
if (getAnnotations == null)
getAnnotations = new PlotController(jsBase + ".annotations()");
return getAnnotations;
}
private String[] annotationsList;
/**
* Setter for the annotations.
*/
public ChartsMekko setAnnotations(String[] annotationsList) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".annotations(%s)", arrayToStringWrapQuotes(annotationsList)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".annotations(%s)", arrayToStringWrapQuotes(annotationsList)));
js.setLength(0);
}
return this;
}
private Crosshair getCrosshair;
/**
* Getter for crosshair settings.
*/
public Crosshair getCrosshair() {
if (getCrosshair == null)
getCrosshair = new Crosshair(jsBase + ".crosshair()");
return getCrosshair;
}
private String crosshair;
private Boolean crosshair1;
/**
* Setter for crosshair settings.
*/
public ChartsMekko setCrosshair(String crosshair) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".crosshair(%s)", wrapQuotes(crosshair)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".crosshair(%s)", wrapQuotes(crosshair)));
js.setLength(0);
}
return this;
}
/**
* Setter for crosshair settings.
*/
public ChartsMekko setCrosshair(Boolean crosshair1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".crosshair(%b)", crosshair1));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".crosshair(%b)", crosshair1));
js.setLength(0);
}
return this;
}
private View getData;
/**
* Getter for the data.
*/
public View getData() {
if (getData == null)
getData = new View(jsBase + ".data()");
return getData;
}
/**
* Setter for the data.
*/
public ChartsMekko setData(List<DataEntry> data) {
if (isChain) {
js.append(";");
isChain = false;
}
if (!data.isEmpty()) {
StringBuilder resultData = new StringBuilder();
resultData.append("[");
for (DataEntry dataEntry : data) {
resultData.append(dataEntry.generateJs()).append(",");
}
resultData.setLength(resultData.length() - 1);
resultData.append("]");
js.append(String.format(Locale.US, "var setData" + ++variableIndex + " = " + jsBase + ".data(%s);", resultData.toString()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".data(%s);", resultData.toString()));
js.setLength(0);
}
}
return this;
}
/**
*
*/
public ChartsMekko setData(View view) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(view.generateJs());
js.append(String.format(Locale.US, "var setData1" + ++variableIndex + " = " + jsBase + ".data(%s);", view.getJsBase()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".data(%s);", view.getJsBase()));
js.setLength(0);
}
return this;
}
private AnychartMathRect getGetPlotBounds;
/**
* Gets data bounds of the chart.<br/>
<b>Note:</b> Works only after {@link anychart.charts.Mekko#draw} is called.
*/
public AnychartMathRect getGetPlotBounds() {
if (getGetPlotBounds == null)
getGetPlotBounds = new AnychartMathRect(jsBase + ".getPlotBounds()");
return getGetPlotBounds;
}
private List<SeriesMekko> getGetSeries = new ArrayList<>();
/**
* Gets series by its id.
*/
public SeriesMekko getGetSeries(Number id) {
SeriesMekko item = new SeriesMekko(jsBase + ".getSeries("+ id+")");
getGetSeries.add(item);
return item;
}
private List<SeriesMekko> getGetSeries1 = new ArrayList<>();
/**
* Gets series by its id.
*/
public SeriesMekko getGetSeries(String id1) {
SeriesMekko item = new SeriesMekko(jsBase + ".getSeries("+ wrapQuotes(id1)+")");
getGetSeries1.add(item);
return item;
}
private List<SeriesMekko> getGetSeriesAt = new ArrayList<>();
/**
* Getter for the series by its index.
*/
public SeriesMekko getGetSeriesAt(Number index) {
SeriesMekko item = new SeriesMekko(jsBase + ".getSeriesAt("+ index+")");
getGetSeriesAt.add(item);
return item;
}
private HatchFills getHatchFillPalette;
/**
* Getter for hatch fill palette settings.
*/
public HatchFills getHatchFillPalette() {
if (getHatchFillPalette == null)
getHatchFillPalette = new HatchFills(jsBase + ".hatchFillPalette()");
return getHatchFillPalette;
}
private HatchFillType[] hatchFillPalette;
private String hatchFillPalette1;
private HatchFills hatchFillPalette2;
/**
* Setter for hatch fill palette settings.
*/
public ChartsMekko setHatchFillPalette(HatchFillType[] hatchFillPalette) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".hatchFillPalette(%s)", arrayToString(hatchFillPalette)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".hatchFillPalette(%s)", arrayToString(hatchFillPalette)));
js.setLength(0);
}
return this;
}
/**
* Setter for hatch fill palette settings.
*/
public ChartsMekko setHatchFillPalette(String hatchFillPalette1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".hatchFillPalette(%s)", wrapQuotes(hatchFillPalette1)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".hatchFillPalette(%s)", wrapQuotes(hatchFillPalette1)));
js.setLength(0);
}
return this;
}
/**
* Setter for hatch fill palette settings.
*/
public ChartsMekko setHatchFillPalette(HatchFills hatchFillPalette2) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(hatchFillPalette2.generateJs());
js.append(jsBase);
js.append(String.format(Locale.US, ".hatchFillPalette(%s);", ((hatchFillPalette2 != null) ? hatchFillPalette2.getJsBase() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".hatchFillPalette(%s)", ((hatchFillPalette2 != null) ? hatchFillPalette2.getJsBase() : "null")));
js.setLength(0);
}
return this;
}
private UiLabelsFactory getLabels;
/**
* Getter for chart data labels.
*/
public UiLabelsFactory getLabels() {
if (getLabels == null)
getLabels = new UiLabelsFactory(jsBase + ".labels()");
return getLabels;
}
private String labels;
private Boolean labels1;
private List<Cartesian> setLabels = new ArrayList<>();
/**
* Setter for chart data labels.
*/
public Cartesian setLabels(String labels) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(String.format(Locale.US, "var setLabels" + ++variableIndex + " = " + jsBase + ".labels(%s);", wrapQuotes(labels)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".labels(%s)", wrapQuotes(labels)));
js.setLength(0);
}
Cartesian item = new Cartesian("setLabels" + variableIndex);
setLabels.add(item);
return item;
}
private String generateJSsetLabels() {
if (!setLabels.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (Cartesian item : setLabels) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private List<Cartesian> setLabels1 = new ArrayList<>();
/**
* Setter for chart data labels.
*/
public Cartesian setLabels(Boolean labels1) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(String.format(Locale.US, "var setLabels1" + ++variableIndex + " = " + jsBase + ".labels(%b);", labels1));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".labels(%b)", labels1));
js.setLength(0);
}
Cartesian item = new Cartesian("setLabels1" + variableIndex);
setLabels1.add(item);
return item;
}
private String generateJSsetLabels1() {
if (!setLabels1.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (Cartesian item : setLabels1) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private List<SeriesMekko> setMekko = new ArrayList<>();
/**
* Adds Mekko series.
*/
public SeriesMekko mekko(List<DataEntry> data) {
if (isChain) {
js.append(";");
isChain = false;
}
if (!data.isEmpty()) {
StringBuilder resultData = new StringBuilder();
resultData.append("[");
for (DataEntry dataEntry : data) {
resultData.append(dataEntry.generateJs()).append(",");
}
resultData.setLength(resultData.length() - 1);
resultData.append("]");
js.append(String.format(Locale.US, "var setMekko" + ++variableIndex + " = " + jsBase + ".mekko(%s);", resultData.toString()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".mekko(%s);", resultData.toString()));
js.setLength(0);
}
}
SeriesMekko item = new SeriesMekko("setMekko" + variableIndex);
setMekko.add(item);
return item;
}
private String generateJSsetMekko() {
if (!setMekko.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (SeriesMekko item : setMekko) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private List<SeriesMekko> setMekko1 = new ArrayList<>();
/**
*
*/
public SeriesMekko mekko(View view) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(view.generateJs());
js.append(String.format(Locale.US, "var setMekko1" + ++variableIndex + " = " + jsBase + ".mekko(%s);", view.getJsBase()));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, jsBase + ".mekko(%s);", view.getJsBase()));
js.setLength(0);
}
SeriesMekko item = new SeriesMekko("setMekko1" + variableIndex);
setMekko1.add(item);
return item;
}
private String generateJSsetMekko1() {
if (!setMekko1.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (SeriesMekko item : setMekko1) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private RangeColors getPalette;
/**
* Getter for the series colors palette.
*/
public RangeColors getPalette() {
if (getPalette == null)
getPalette = new RangeColors(jsBase + ".palette()");
return getPalette;
}
private RangeColors palette;
private DistinctColors palette1;
private String palette2;
private String[] palette3;
/**
* Setter for the series colors palette.
<b>Note</b>: You can use predefined palettes from {@link anychart.palettes}.
*/
public ChartsMekko setPalette(RangeColors palette) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(palette.generateJs());
js.append(jsBase);
js.append(String.format(Locale.US, ".palette(%s);", ((palette != null) ? palette.getJsBase() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".palette(%s)", ((palette != null) ? palette.getJsBase() : "null")));
js.setLength(0);
}
return this;
}
/**
* Setter for the series colors palette.
<b>Note</b>: You can use predefined palettes from {@link anychart.palettes}.
*/
public ChartsMekko setPalette(DistinctColors palette1) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(palette1.generateJs());
js.append(jsBase);
js.append(String.format(Locale.US, ".palette(%s);", ((palette1 != null) ? palette1.getJsBase() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".palette(%s)", ((palette1 != null) ? palette1.getJsBase() : "null")));
js.setLength(0);
}
return this;
}
/**
* Setter for the series colors palette.
<b>Note</b>: You can use predefined palettes from {@link anychart.palettes}.
*/
public ChartsMekko setPalette(String palette2) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".palette(%s)", wrapQuotes(palette2)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".palette(%s)", wrapQuotes(palette2)));
js.setLength(0);
}
return this;
}
/**
* Setter for the series colors palette.
<b>Note</b>: You can use predefined palettes from {@link anychart.palettes}.
*/
public ChartsMekko setPalette(String[] palette3) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".palette(%s)", arrayToStringWrapQuotes(palette3)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".palette(%s)", arrayToStringWrapQuotes(palette3)));
js.setLength(0);
}
return this;
}
private Number pointsPadding;
/**
* Setter for points padding.
*/
public ChartsMekko setPointsPadding(Number pointsPadding) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".pointsPadding(%s)", pointsPadding));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".pointsPadding(%s)", pointsPadding));
js.setLength(0);
}
return this;
}
private Number id2;
private String id3;
/**
* Removes one of series from chart by its id.
*/
public ChartsMekko removeSeries(Number id2) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".removeSeries(%s)", id2));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".removeSeries(%s)", id2));
js.setLength(0);
}
return this;
}
/**
* Removes one of series from chart by its id.
*/
public ChartsMekko removeSeries(String id3) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".removeSeries(%s)", wrapQuotes(id3)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".removeSeries(%s)", wrapQuotes(id3)));
js.setLength(0);
}
return this;
}
private Number index1;
/**
* Removes one of series from chart by its index.
*/
public ChartsMekko removeSeriesAt(Number index1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".removeSeriesAt(%s)", index1));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".removeSeriesAt(%s)", index1));
js.setLength(0);
}
return this;
}
private CoreAxesLinear getXAxis;
/**
* Getter for chart X-axis.
*/
public CoreAxesLinear getXAxis() {
if (getXAxis == null)
getXAxis = new CoreAxesLinear(jsBase + ".xAxis()");
return getXAxis;
}
private List<CoreAxesLinear> getXAxis1 = new ArrayList<>();
/**
* Getter for chart X-axis.
*/
public CoreAxesLinear getXAxis(Number index2) {
CoreAxesLinear item = new CoreAxesLinear(jsBase + ".xAxis("+ index2+")");
getXAxis1.add(item);
return item;
}
private String xAxis;
private Boolean xAxis1;
/**
* Setter for chart X-axis.
*/
public ChartsMekko setXAxis(String xAxis) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xAxis(%s)", wrapQuotes(xAxis)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xAxis(%s)", wrapQuotes(xAxis)));
js.setLength(0);
}
return this;
}
/**
* Setter for chart X-axis.
*/
public ChartsMekko setXAxis(Boolean xAxis1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xAxis(%b)", xAxis1));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xAxis(%b)", xAxis1));
js.setLength(0);
}
return this;
}
private Number index3;
private String xAxis2;
private Boolean xAxis3;
/**
* Setter for chart X-axis by index.
*/
public ChartsMekko setXAxis(String xAxis2, Number index3) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xAxis(%s, %s)", wrapQuotes(xAxis2), index3));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xAxis(%s, %s)", wrapQuotes(xAxis2), index3));
js.setLength(0);
}
return this;
}
/**
* Setter for chart X-axis by index.
*/
public ChartsMekko setXAxis(Boolean xAxis3, Number index3) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xAxis(%b, %s)", xAxis3, index3));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xAxis(%b, %s)", xAxis3, index3));
js.setLength(0);
}
return this;
}
private Ordinal getXScale;
/**
* Getter for default chart X scale.
*/
public Ordinal getXScale() {
if (getXScale == null)
getXScale = new Ordinal(jsBase + ".xScale()");
return getXScale;
}
private String xScale;
private ScaleTypes xScale1;
private String xScale2;
private Ordinal xScale3;
/**
* Setter for default chart X scale.
*/
public ChartsMekko setXScale(String xScale) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xScale(%s)", wrapQuotes(xScale)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xScale(%s)", wrapQuotes(xScale)));
js.setLength(0);
}
return this;
}
/**
* Setter for default chart X scale.
*/
public ChartsMekko setXScale(ScaleTypes xScale1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".xScale(%s)", ((xScale1 != null) ? xScale1.generateJs() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xScale(%s)", ((xScale1 != null) ? xScale1.generateJs() : "null")));
js.setLength(0);
}
return this;
}
/**
* Setter for default chart X scale.
*/
public ChartsMekko setXScale(Ordinal xScale3) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(xScale3.generateJs());
js.append(jsBase);
js.append(String.format(Locale.US, ".xScale(%s);", ((xScale3 != null) ? xScale3.getJsBase() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".xScale(%s)", ((xScale3 != null) ? xScale3.getJsBase() : "null")));
js.setLength(0);
}
return this;
}
private CoreAxesLinear getYAxis;
/**
* Getter for chart Y-axis.
*/
public CoreAxesLinear getYAxis() {
if (getYAxis == null)
getYAxis = new CoreAxesLinear(jsBase + ".yAxis()");
return getYAxis;
}
private List<CoreAxesLinear> getYAxis1 = new ArrayList<>();
/**
* Getter for chart Y-axis.
*/
public CoreAxesLinear getYAxis(Number index4) {
CoreAxesLinear item = new CoreAxesLinear(jsBase + ".yAxis("+ index4+")");
getYAxis1.add(item);
return item;
}
private String yAxis;
private Boolean yAxis1;
/**
* Setter for chart Y-axis.
*/
public ChartsMekko setYAxis(String yAxis) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yAxis(%s)", wrapQuotes(yAxis)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yAxis(%s)", wrapQuotes(yAxis)));
js.setLength(0);
}
return this;
}
/**
* Setter for chart Y-axis.
*/
public ChartsMekko setYAxis(Boolean yAxis1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yAxis(%b)", yAxis1));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yAxis(%b)", yAxis1));
js.setLength(0);
}
return this;
}
private Number index5;
private String yAxis2;
private Boolean yAxis3;
/**
* Setter for chart Y-axis by index.
*/
public ChartsMekko setYAxis(String yAxis2, Number index5) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yAxis(%s, %s)", wrapQuotes(yAxis2), index5));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yAxis(%s, %s)", wrapQuotes(yAxis2), index5));
js.setLength(0);
}
return this;
}
/**
* Setter for chart Y-axis by index.
*/
public ChartsMekko setYAxis(Boolean yAxis3, Number index5) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yAxis(%b, %s)", yAxis3, index5));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yAxis(%b, %s)", yAxis3, index5));
js.setLength(0);
}
return this;
}
private ScalesBase getYScale;
/**
* Getter for default chart Y scale.
*/
public ScalesBase getYScale() {
if (getYScale == null)
getYScale = new ScalesLinear(jsBase + ".yScale()");
return getYScale;
}
private String yScale;
private ScaleTypes yScale1;
private String yScale2;
private ScalesBase yScale3;
/**
* Setter for default chart Y scale.
*/
public ChartsMekko setYScale(String yScale) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yScale(%s)", wrapQuotes(yScale)));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yScale(%s)", wrapQuotes(yScale)));
js.setLength(0);
}
return this;
}
/**
* Setter for default chart Y scale.
*/
public ChartsMekko setYScale(ScaleTypes yScale1) {
if (!isChain) {
js.append(jsBase);
isChain = true;
}
js.append(String.format(Locale.US, ".yScale(%s)", ((yScale1 != null) ? yScale1.generateJs() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yScale(%s)", ((yScale1 != null) ? yScale1.generateJs() : "null")));
js.setLength(0);
}
return this;
}
/**
* Setter for default chart Y scale.
*/
public ChartsMekko setYScale(ScalesBase yScale3) {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(yScale3.generateJs());
js.append(jsBase);
js.append(String.format(Locale.US, ".yScale(%s);", ((yScale3 != null) ? yScale3.getJsBase() : "null")));
if (isRendered) {
onChangeListener.onChange(String.format(Locale.US, ".yScale(%s)", ((yScale3 != null) ? yScale3.getJsBase() : "null")));
js.setLength(0);
}
return this;
}
private String generateJSgetAnnotations() {
if (getAnnotations != null) {
return getAnnotations.generateJs();
}
return "";
}
private String generateJSgetCrosshair() {
if (getCrosshair != null) {
return getCrosshair.generateJs();
}
return "";
}
private String generateJSgetData() {
if (getData != null) {
return getData.generateJs();
}
return "";
}
private String generateJSgetGetPlotBounds() {
if (getGetPlotBounds != null) {
return getGetPlotBounds.generateJs();
}
return "";
}
private String generateJSgetGetSeries() {
if (!getGetSeries.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (SeriesMekko item : getGetSeries) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private String generateJSgetGetSeries1() {
if (!getGetSeries1.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (SeriesMekko item : getGetSeries1) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private String generateJSgetGetSeriesAt() {
if (!getGetSeriesAt.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (SeriesMekko item : getGetSeriesAt) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private String generateJSgetHatchFillPalette() {
if (getHatchFillPalette != null) {
return getHatchFillPalette.generateJs();
}
return "";
}
private String generateJSgetLabels() {
if (getLabels != null) {
return getLabels.generateJs();
}
return "";
}
private String generateJSgetPalette() {
if (getPalette != null) {
return getPalette.generateJs();
}
return "";
}
private String generateJSgetXAxis() {
if (getXAxis != null) {
return getXAxis.generateJs();
}
return "";
}
private String generateJSgetXAxis1() {
if (!getXAxis1.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (CoreAxesLinear item : getXAxis1) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private String generateJSgetXScale() {
if (getXScale != null) {
return getXScale.generateJs();
}
return "";
}
private String generateJSgetYAxis() {
if (getYAxis != null) {
return getYAxis.generateJs();
}
return "";
}
private String generateJSgetYAxis1() {
if (!getYAxis1.isEmpty()) {
StringBuilder resultJs = new StringBuilder();
for (CoreAxesLinear item : getYAxis1) {
resultJs.append(item.generateJs());
}
return resultJs.toString();
}
return "";
}
private String generateJSgetYScale() {
if (getYScale != null) {
return getYScale.generateJs();
}
return "";
}
@Override
protected String generateJs() {
if (isChain) {
js.append(";");
isChain = false;
}
js.append(generateJSgetAnnotations());
js.append(generateJSgetCrosshair());
js.append(generateJSgetData());
js.append(generateJSgetGetPlotBounds());
js.append(generateJSgetGetSeries());
js.append(generateJSgetGetSeries1());
js.append(generateJSgetGetSeriesAt());
js.append(generateJSgetHatchFillPalette());
js.append(generateJSgetLabels());
js.append(generateJSgetPalette());
js.append(generateJSgetXAxis());
js.append(generateJSgetXAxis1());
js.append(generateJSgetXScale());
js.append(generateJSgetYAxis());
js.append(generateJSgetYAxis1());
js.append(generateJSgetYScale());
js.append(generateJSsetLabels());
js.append(generateJSsetLabels1());
js.append(generateJSsetMekko());
js.append(generateJSsetMekko1());
js.append(super.generateJsGetters());
js.append(super.generateJs());
String result = js.toString();
js.setLength(0);
return result;
}
} |
declare module '@tanem/svg-injector'
|
#include <iostream>
#include <string>
#include <sstream>
#include <string.h>
#include <stdio.h>
#include <map>
#include <vector>
#include <set>
#include <stack>
#include <algorithm>
#include <queue>
#include <iterator>
#include <cmath>
#define inf 0x3f3f3f3f
#define pb push_back
#define mp make_pair
#define fs first
#define sc second
using namespace std;
const int MAXN = 50000, SOMA26 = 351;
typedef long long int lld;
typedef pair<int,int> pii;
typedef pair<pii,pii> p4i;
typedef vector<pii> vii;
typedef vector<int> vi;
typedef long long ll;
typedef map<int,int>::iterator mit;
typedef pair<mit,bool> pitb;
typedef vii::iterator itv;
int n, vet[MAXN], nice = 0;
string line;
int fill(int a, int b){
int cnt[26], cntp = 0;
vector<char> cntf;
for(int i = 0; i < 26; i++) cnt[i] = 0;
for(int i = a; i <= b; i++){
if(line[i] != '?') cnt[line[i]-'A']++;
else cntp++;
}
for(int i = 0; i < 26; i++) if(!cnt[i]) cntf.pb('A'+i);
if(cntp >= (int) cntf.size()){
int k = 0;
for(int i = a; i <= b; i++){
if(line[i] == '?'){
if(k == (int) cntf.size()){
line[i] = 'A';
}else{
line[i] = cntf[k++];
}
}
}
return 1;
}
return 0;
}
void greedyFill(int a, int b){
for(int i = a; i <= b; i++){
if(line[i] == '?') line[i] = 'A';
}
}
int main()
{
getline(cin,line);
n = line.size();
if(n >= 26){
for(int k = 25; k < n; k++){
if(!nice){
if(fill(k-25,k)){
nice = 1;
}
}else{
greedyFill(k-25,k);
}
}
if(nice){
greedyFill(0,n-1);
cout << line << endl;
}else{
printf("-1\n");
}
}else{
printf("-1\n");
}
return 0;
}
|
Alteration of the gut microbiota in Chinese population with chronic kidney disease We evaluated differences in the compositions of faecal microbiota between 52 end stage renal disease (ESRD) patients and 60 healthy controls in southern China using quantitative real-time polymerase chain reaction (qPCR) and high-throughput sequencing (16S ribosomal RNA V4-6 region) methods. The absolute quantification of total bacteria was significantly reduced in ESRD patients (p<0.01). In three enterotypes, Prevotella was enriched in the healthy group whereas Bacteroides were prevalent in the ESRD group (LDA score>4.5). 11 bacterial taxa were significantly overrepresented in samples from ESRD and 22 bacterial taxa were overrepresented in samples from healthy controls. The butyrate producing bacteria, Roseburia, Faecalibacterium, Clostridium, Coprococcus and Prevotella were reduced in the ESRD group (LDA values>2.0). Canonical correspondence analysis (CCA) indicated that Cystatin C (CysC), creatinine and eGFR appeared to be the most important environmental parameters to influence the overall microbial communities. In qPCR analysis, The butyrate producing species Roseburia spp., Faecalibacterium prausnitzii, Prevotella and Universal bacteria, were negatively related to CRP and CysC. Total bacteria in faeces were reduced in patients with ESRD compared to that in healthy individuals. The enterotypes change from Prevotella to Bacteroides in ESRD patients. The gut microbiota was associated with the inflammatory state and renal function of chronic kidney disease. intestinal microbial flora 20. Moreover some studies have suggested the pathogenic role of gut microbiota in kidney disease 21. Alterations in the composition of the microbiome and accumulation of gut derived uremic toxins (such as lipopolysaccharides, indoxyl sulphate (IS), p-cresyl sulphate (PCS), amines, ammonia, and trimethylamine oxide) contribute to the systemic inflammation, cardiovascular disease and numerous other CKD associated complications 13,22,23. IS and PCS were associated with elevated levels of selected inflammatory markers (serum IL-6, TNF-alpha and IFN-gamma) and an antioxidant in CKD patients 24 and predict progression of CKD 25. Butyrate produced from microbial fermentation is important for energy metabolism and normal development of colonic epithelial cells, mainly has a protective role in relation to colonic disease, and appears to decrease the inflammatory response 26,27. Smith et al. 28 found that short chain fatty acids (SCFAs) regulate the size and function of the colonic Treg pool, which play a major role in the pathogenesis of systemic inflammation, maintaining immunological self-tolerance, limiting the inflammatory response to foreign antigens and protecting against colitis. Butyrate regulates the differentiation of Treg cells 29. ESRD is compounded by the depletion and dysfunction of regulatory T lymphocytes 30. CKD impairs the barrier function and alters microbial flora of the intestine. Bacterial translocation and uremic toxicity as possible sources contributed to the chronic inflammation noted in uremia 31,32. The aim of this study was to evaluate and quantify differences in the composition of gut microbiota in ESRD patients in southern China. Materials and Methods Study subjects. CKD definitions and classifications in this study are in accordance with the 2002 clinical practice guideline, end stage renal disease (ESRD) was defined as the estimated glomerular filtration rate (eGFR) less than 15 mL/min/1.73 m 2 for 3 months, irrespective of the presence or absence of kidney damage 33. All ESRD patients were diagnosed in accordance with this guideline by professional kidney internal medicine physicians 33. All methods, including the collection of blood and faecal samples, were performed in accordance with the relevant guidelines and regulations. All the people have signed the informed consent. The study was reviewed and approved by the Medical Ethics Committee of the Southern Medical University, Guangzhou, China. Fresh faecal samples collected in sterile containers from 52 ESRD patients and 60 healthy volunteers (controls) were used for quantitative PCR (qPCR), of these, samples from 27 ESRD patients and 26 healthy volunteers underwent Pyrosequencing. The underlying cause of 21 ESRD patients was chronic glomerulonephritis, 11 was hypertensive nephropathy, 6 was obstructive nephropathy, 3 was polycystic kidney disease, 2 was systemic lupus erythematosus, 2 was chronic pyelonephritis, 7 was unclear. Only two of the ESRD patients have received hemodialysis therapy through deep venous catheterization for once before the enrollment because of the acute hyperkalemia. The rest patients have never been treated with dialysis. All ESRD inpatients had never been treated with dialysis or without a regular dialysis. Exclusion criteria included treatment with antibiotics, probiotics/prebiotics and other laxatives in the 4 weeks preceding sample collection. We also excluded cholecystectomy, colectomy or intestinal disease and diabetes and hyperlipidemia from our data. Clinical datas of all the subjects were shown in Table 1.. Sequencing results were clustered by lllumina paired barcoded -sequencing (end) (BIPES) (PE) process for preliminary analysis, the rest of the sequence were screened by UCHIME and removed the suspected chimeric sequence. All reads were sorted into different samples according to their barcodes. Then the two stage clustering (TSC) was used for clustering to extract the OUT in order to to distinguish the high abundance and low abundance sequences. Principal coordinates analysis (PcoA) based on UniFrac distance was performed with QIIME. The linear discriminant analysis (LDA) with effect size measurements (LEfSe) were used to identify indicator bacterial groups specialized within the two groups. Results Patients and controls. CysC, BUN, and creatinine was significantly higher, and eGFR was reduced in ESRD patients compared to healthy controls. Levels of the plasma inflammatory biomarker CRP differed significantly between ESRD patients and controls (p = 0.005). LPS was increased in ESRD patients (p = 0.033). The ethic background of all the participants were Han nationally Chinese. All the ESRD patients had been treated with phosphate binders, oral iron supplements or intravenous iron compounds, antihypertensive drugs. 12 of the ESRD patients had been treated with calcium supplements and Vitamin D. There were no significant differences in age, sex, body mass index (BMI), glucose, TG, CHOL, VLDL, LDL, HDL, Lpa, Lipoprotein a (Lpa), apolipoprotein E (ApoE), and apolipoprotein A, B (ApoA, B) ( Table 1). Diversity and phylum/subfamily -level taxonomic distribution of gut microbiota in ESRD patients. Diversity concerns both taxon richness and evenness, and our results demonstrated that the diversity was similar (p > 0.05) as assessed by chao1, observed_species, Shannon, simpson diversity indexs. PCoA based on the UniFrac metric did not reveal a separation trend of healthy controls and ESRD patients (Fig. 1). Bacteroidetes was the most abundant phylum in both healthy individuals and CKD patients, accounting for 41.76%, 40.23% of the total valid reads respectively. Firmicutes was the second most abundant phylum in all samples with an average relative abundance of 41.43%, 38.01% respectively. The other dominant phyla were Proteobacteria, Actinobacteria, Fusobacteria, Verrucomicrobia and Others ( Fig. 2A). Based on the average relative abundance, 21 genera were dominant (>=1%) at the genus level. Bacteroides, Escherichia/Shigella, Subdoligranulum, Fusobacterium. etc were enriched in ESRD patients. Prevotella, Roseburia, Faecalibacterium, Megamonas. etc were more abundant in controls (Fig. 2B). A reduction in SCFAs producing bacteria as a prominent feature of ESRD patients. LEfSe showed so much biomarkers for ESRD patients and controls subiects (Fig. 3) (LDA score > 2.0, p < 0.05). 11 species enriched in ESRD patients, and 22 in controls. According to Wong J 39 Kyoto Encyclopedia of Genes and Genomes (KEGG) analysis, Bacteroidaceae with p-Cresol production enzymes enriched in ESRD patients. Desulfovibrionaceae, Bacteroidaceae, Alcaligenaceae, Pseudomonadaceae, and Pasteurellaceae produced urease, Bacteroidaceae's relative abundance was higher, the others were much lower in ESRD patients than the controls group in this study. Microbes of the genus Prevotella, Roseburia, Faecalibacterium, Clostridium, Coprococcus can produce butyrate 26, Dorea was the other predominant SCFA-producing genera 40. All of these species were reduced in ESRD patients, indicating that bacteria producing SCFAs especially butyrate were decreased in ESRD patients. Bacteroides (enterotype 1), Prevotella (enterotype 2) and Ruminococcus (enterotype 3) were three main enterotypes of human gut microbiota 41. In this study, from healthy people to ESRD patients, the enterotype changes from Prevotella (enterotype 2) to Bacteroides (enterotype 1). Canonical correspondence analysis (CCA). Microbial community may be more correlated with indigenous environmental parameters. Analyzing the dynamic changes of microbial communities with geochemical factors will reveal the correlation between environmental parameters and microbial community. Therefore, CCA analysis was used to reveal how microbes can adapt to the changes of physiochemical environments. A correlation between the important environmental parameters and microbial community was discerned by CCA analysis as shown in Fig. 4. Sixteen environmental parameters and the dominant genera (>1%) in each sample were selected to determine their correlation. The length of an environmental parameter arrow indicated the strength of the environmental parameter to the overall microbial communities. As such, CysC (r^2 = 0.1689, p = 0.020), creatinine (r^2 = 0.1593, p = 0.008) and eGFR (r^2 = 0.1255, p = 0.041) concentrations appears to be the most important environmental parameters (Monte Carlo test). For instance, Enterobacter, Bacteroides, Fusobacterium, Escherichia and Klebsiella, which were positively correlated with CysC, creatinine (Scr) as shown in Fig. 4, and dominant in ESRD patients. Whereas Faecalibacterium, Akkermansia, Prevotella, Roseburia, Coprococcus and Clostridium were positively correlated with eGFR, and dominant in controls. Therefore, it is fair to propose that CKD played an active role in shaping the indigenous microbial communities. Quantification of well known species in faeces by qPCR. qPCR was used to assess changes in bacterial absolute quantity in faecal samples from the two groups (Fig. 5). Bacterial copy number values were converted into logarithmic values before analysis. Quantities of total gene copies of Universal bacteria, E. coli, Bifidobacterium, Bacteroides fragilis group, Enterococcus spp., Clostridium coccoides group, Faecalibacterium prausnitzii, Roseburia spp. and Prevotella were significantly decreased in ESRD patients compared with controls (p = 0.000, p = 0.001, p = 0.000, p = 0.000, p = 0.000, p = 0.000, p = 0.028, p = 0.000, p = 0.000, respectively). However, the numbers of beneficial microorganisms from the Lactobacillus group were similar between two groups (p = 0.395). In ESRD patients, universal bacteria were decreased, and the butyrate producing species Clostridium coccoides group, Faecalibacterium prausnitzii, Roseburia spp. and Prevotella were also reduced, consistent with the sequencing results. Discussion This report represents the first investigation of faecal microbiota diversity and quantity among Chinese CKD patients that employ high-throughput sequencing and qPCR analyses. We supplemented the intestinal bacteria data of CKD patients. In the analysis of sequencing data, we did not find any diversity differences between CKD patients and controls, which suggests that the diversity of the bacterial community was not destroyed critically, it was not like microbe-scarce scenario. Bacteroidetes (~40%), Firmicutes (~40%) and Proteobacteria (~10%) were the predominant phyla in both healthy individuals and CKD patients, consistent with reports from previous studies among cohorts from Western countries, Africa and Asia. Although Bacteroidetes and Firmicutes were the two most abundant phyla constituting the vast majority of gut microbiota in this study, an interesting variation occurred with regards to Bacteroidetes. Through LEfSe analysis, we found that Prevotella was enriched in the healthy group, and Bacteroides in the CKD group. This enterotype conversion proves once again the correlation between the intestinal flora and CKD 41. Distribution of a number of genera could be differentiated between ESRD patients and controls. The SCFAs (propionate, acetate, and butyrate) are a by-product of the fermentation of non-absorbable complex carbohydrates. Firmicutes-Clostridiales-Lachnospiraceae -Dorea producing SCFAs 40 were diminished in ESRD patients. Members of Prevotellaceae possess phosphotransbutyrylase and butyrate kinase 39, and Prevotella can produce SCFAs 45. In this study, both Prevotella and Prevotellaceae were reduced in ESRD patients. The human colonic butyrate (Short-chain fatty acids) producers are Gram-positive firmicutes, but are phylogenetically diverse. Clostridiales cluster the XIVa (Clostridium coccoides) including Ruminococcus, Coprococcus, Eubacterium hallii (E. hallii), Eubacterium rectale/Roseburia spp. and Clostridiales cluster IV (Clostridium leptum) including Faecalibacterium prausnitzii, and Eubacterium spp. are normally the two most abundant groups of human faecal bacteria that produce butyrate 26,46,47. Roseburia, Coprococcus, and Faecalibacterium belong to Firmicutes-Clostridiales. All of them are typically producing butyrate bacteria and were particularly and significantly more abundant in healthy controls and decreased in ESRD patients and consistent with previous studies 39. The qPCR analysis of Roseburia spp. and Faecalibacterium prausnitzii showed a similar trend in ESRD. Butyrate gets involved in the adjustment of body reaction to inflammation 29. Systemic inflammation in patients with end-stage renal disease (ESRD) is mediated by activation of the innate immune system 48. The presence of persistent inflammation magnifies the risk of poor outcome, and is a risk factor for cardiovascular disease (CVD), via mechanisms related to exacerbation of both wasting and vascular calcification processes and self-enhancement of the inflammatory cascade 49. High dietary total fiber intake is associated with lower risk of inflammation and mortality in kidney disease 50. Interestingly, CRP was increased in ESRD compared with that in controls. Spearman rank correlation analysis demonstrated that the absolute abundance of Roseburia spp., Faecalibacterium prausnitzii, Prevotella and Universal bacteria were negatively associated with CRP level and renal function indexes. These data indicate that bacteria producing butyrate as biomarkers may involve in the pathological process of CKD. Recently, Andrade-Oliveira V proved that SCFAs can reduce inflammation in acute kidney injury (AKI), which supports our inferences 51. Reduced quantity of fecal microbiota were found in ESRD patients on qPCR analysis, This means that the absolute quantity of total faecal microbiota was decreased in CKD patients. In general, Universal bacteria, E. coli, Bifidobacterium, Bacteroides fragilis group, Enterococcus spp., Clostridium coccoides group, Faecalibacterium prausnitzii, Roseburia spp. and Prevotella were decreased in ESRD. Bifidobacterium, Roseburia and Clostridium coccoides 45 can produce SCFAs. Bacteroides fragilis and Clostridium spp. can protect against dextran sulfate sodium (DSS)-or trinitrobenzenesulfonic acid-induced colitis 52,53. This suggests that CKD status may influence the absolute quantity of the microbiome, which may result from accumulation of uremic toxins, inflammation and malnutrition and needs further investigation. This reduction in beneficial bacteria may play an important role in the pathogenic processes of CKD. LPS is derived from the cell wall of gram negative bacteria, and the increase of the gamma Proteobacteria is also effective in increasing the LPS level in circulation. The degree of circulating endotoxemia might be related to the severity of systemic inflammation and features of atherosclerosis in peritoneal dialysis (PD) patients 54. LPS may accelerate activation of neutrophils and macrophages/monocytes, which further explain the persistent inflammation of ESRD 55. Although most CKD patients presented signs of fluid overload that was associated with endotoxaemia, there was no association between endotoxaemia and systemic inflammation, suggesting the endotoxaemia may not be the main determinant of the inflammatory status in CKD patients 56. So the correlation between LPS and inflammation is unclear. In this study, LPS was elevated in ESRD patients, but we didn't find the correlation between LPS and bacterial amounts. To determine the characteristics of gut microbiota based on kidney function, we excluded the influences of body mass index (BMI), blood lipids, and blood glucose. No significant differences in blood lipid and blood glucose levels were found between CKD patients and controls consistent with that reported in previous study by McIntyre, C. W. 57. Further research is needed in this area to provide more conclusive evidence while taking into account the relationships of gut flora with human diet, environment and habits. Table 3. Correlation analysis of CRP, CysC, BUN, creatinine, eGFR values and the species count determined by qPCR. Abbrevitions: CysC, Cystatin C; BUN, Blood Urea Nitrogen; eGFR, estimated glomerular filtration rate. Spearman rank correlation were used to evaluate statistical importance: r: correlation coefficient. *p < 0.05, **p < 0.01. |
<reponame>robrac/algorithms-exercises-with-python
def insert(self,key,data):
if self.head == None:
self.head = HeaderNode()
temp = DataNode(key,data)
self.head.setNext(temp)
top = temp
while flip() == 1:
newhead = HeaderNode()
temp = DataNode(key,data)
temp.setDown(top)
newhead.setNext(temp)
newhead.setDown(self.head)
self.head = newhead
top = temp
else:
|
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__license__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
from urllib import request
import time
import singleton_factory
from pymal import consts
from pymal import decorators
from pymal import exceptions
__all__ = ['MyAnime']
class MyAnime(object, metaclass=singleton_factory.SingletonFactory):
"""
Saves an account data about anime.
:ivar my_enable_discussion: boolean
:ivar my_id: int
:ivar my_status: int. #TODO: put the dictionary here.
:ivar my_score: int.
:ivar my_start_date: string as mmddyyyy.
:ivar my_end_date: string as mmddyyyy.
:ivar my_priority: int.
:ivar my_storage_type: int. #TODO: put the dictanary here.
:ivar my_storage_value: float.
:ivar my_is_rewatching: boolean.
:ivar my_completed_episodes: int.
:ivar my_download_episodes: int.
:ivar my_times_rewatched: int.
:ivar my_rewatch_value: int.
:ivar my_tags: frozenset.
:ivar my_comments: string
:ivar my_fan_sub_groups: string.
"""
__TAG_SEPARATOR = ';'
__MY_MAL_URL = request.urljoin(
consts.HOST_NAME, 'editlist.php?type=anime&id={0:d}')
__MY_MAL_DELETE_URL = request.urljoin(
consts.HOST_NAME, 'api/animelist/delete/{0:d}.xml')
__MY_MAL_UPDATE_URL = request.urljoin(
consts.HOST_NAME, 'api/animelist/update/{0:d}.xml')
def __init__(self, mal_id: int, my_mal_id, account):
"""
"""
from pymal import anime
if isinstance(mal_id, anime.Anime):
self.obj = mal_id
else:
self.obj = anime.Anime(mal_id)
self.__my_mal_url = self.__MY_MAL_URL.format(self.obj.id)
self._is_my_loaded = False
self._account = account
self.__my_mal_id = my_mal_id
self.__my_status = 0
self.my_enable_discussion = False
self.__my_score = 0.0
self.__my_start_date = ''
self.__my_end_date = ''
self.__my_priority = 0
self.__my_storage_type = 0
self.__my_storage_value = 0.0
self.__my_comments = ''
self.__my_fan_sub_groups = ''
self.__my_tags = frozenset()
self.__my_is_rewatching = None
self.__my_completed_episodes = None
self.__my_download_episodes = 0
self.__my_times_rewatched = 0
self.__my_rewatch_value = None
@property
def my_id(self) -> int:
"""
:return: the id in the account.
:rtype: int
"""
return self.__my_mal_id
@property
@decorators.my_load
def my_status(self) -> int:
"""
:return: the status as number between 1 to 6.
:rtype: int
"""
return self.__my_status
@my_status.setter
def my_status(self, status: int):
"""
:param status: the value to put in status. must be between 1 to 6.
:type: int
"""
if not (1 <= status <= 6):
raise RuntimeError("value of my_statue can be 1 to 6")
self.__my_status = status
@property
@decorators.my_load
def my_score(self) -> int:
"""
:return: The score as int between 0 to 10.
:rtype: int
"""
return self.__my_score
@my_score.setter
def my_score(self, score: int):
"""
:param score: The score. Must be between 0 to 10.
:type: int
"""
if not (0 <= score <= 10):
raise RuntimeError("score must be between 0 to 10")
self.__my_score = score
@property
@decorators.my_load
def my_start_date(self) -> str:
"""
:return: the start date of watching.
"""
return self.__my_start_date
@my_start_date.setter
def my_start_date(self, start_date_string: str):
"""
:param start_date_string: An string that look like {@link consts.MALAPI_FORMAT_TIME}".
:type: str
"""
time.strptime(start_date_string, consts.MALAPI_FORMAT_TIME)
self.__my_start_date = start_date_string
@property
@decorators.my_load
def my_end_date(self) -> str:
"""
:return: the end date of watching.
:type: str
"""
return self.__my_end_date
@my_end_date.setter
def my_end_date(self, end_date_string: str):
"""
:param end_date_string: An string that look like {@link consts.MALAPI_FORMAT_TIME}".
:type: str
"""
time.strptime(end_date_string, consts.MALAPI_FORMAT_TIME)
self.__my_end_date = end_date_string
@property
@decorators.my_load
def my_priority(self) -> int:
"""
:return: The priority value as int between 0 to 3
:rtype: int
"""
return self.__my_priority
@my_priority.setter
def my_priority(self, priority: int):
"""
:param priority: priority must be between 0 to 3.
:type: int
"""
if not (0 <= priority <= 3):
raise RuntimeError("priority can be 0 to 3")
self.__my_priority = priority
@property
@decorators.my_load
def my_storage_type(self) -> int:
"""
:return: The storage type of the downloaded episodes. Between 0 to 7.
:rtype: int
"""
return self.__my_storage_type
@my_storage_type.setter
def my_storage_type(self, storage_type: int):
"""
:param storage_type: int between 0 to 7.
:type: int
"""
if not (0 <= storage_type <= 7):
raise RuntimeError("value of my_storage_type can be 0 to 7")
self.__my_storage_type = storage_type
@property
@decorators.my_load
def my_storage_value(self) -> float:
"""
:return: the storage value (the size you saved) - float but a real number!
:rtype: float
"""
return self.__my_storage_value
@my_storage_value.setter
def my_storage_value(self, storage_value: float):
"""
:param storage_value: the storage value (the size you saved) - float but a real number!
:type: float
"""
int(storage_value)
self.__my_storage_value = storage_value
@property
@decorators.my_load
def my_is_rewatching(self) -> bool:
"""
:return: a flag to know if rewatching now.
:rtype: bool
"""
return self.__my_is_rewatching
@my_is_rewatching.setter
def my_is_rewatching(self, is_rewatching: bool):
"""
:param is_rewatching: a flag to know if rewatching now.
:type: bool
"""
self.__my_is_rewatching = is_rewatching
@property
@decorators.my_load
def my_completed_episodes(self) -> int:
"""
:return: the number of completed episodes.
:rtype: int
"""
return self.__my_completed_episodes
@my_completed_episodes.setter
def my_completed_episodes(self, completed_episodes: int):
"""
:param completed_episodes: the number of completed episodes. Between 0 to number of episodes.
:type: int
"""
if not (0 <= completed_episodes <= self.episodes):
raise RuntimeError("value of my_completed_episodes can be 0 to self.episodes")
self.__my_completed_episodes = completed_episodes
@property
@decorators.my_load
def my_download_episodes(self) -> int:
"""
:return: the number of downloaded episodes.
:rtype: int
"""
return self.__my_download_episodes
@my_download_episodes.setter
def my_download_episodes(self, downloaded_episodes: int):
"""
:param downloaded_episodes: the number of downloaded episodes. Between 0 to number of episodes.
:type: int
"""
if not (0 <= downloaded_episodes <= self.episodes):
raise RuntimeError("downloaded episodes can be 0 to self.episodes")
self.__my_download_episodes = downloaded_episodes
@property
@decorators.my_load
def my_times_rewatched(self) -> int:
"""
:return: The times of rewatching is a positive value.
:type: int
"""
return self.__my_times_rewatched
@my_times_rewatched.setter
def my_times_rewatched(self, times_rewatched: int):
"""
:param times_rewatched: the times of rewatching must be a positive value.
:type: int
"""
if not (0 <= times_rewatched):
raise RuntimeError("value of my_times_rewatched can be 0 or more")
self.__my_times_rewatched = times_rewatched
@property
@decorators.my_load
def my_rewatch_value(self) -> int:
"""
:return: The rewatching is between 0 to 5.
:type: int
"""
return self.__my_rewatch_value
@my_rewatch_value.setter
def my_rewatch_value(self, rewatch_value: int):
"""
:param rewatch_value: The rewatching must be between 0 to 5.
:type: int
"""
if not (0 <= rewatch_value <= 5):
raise RuntimeError("rewatch value can be 0 to 5")
self.__my_rewatch_value = rewatch_value
@property
@decorators.my_load
def my_tags(self):
"""
:return: the account tags.
:rtype: frozenset
"""
return self.__my_tags
@property
@decorators.my_load
def my_comments(self):
"""
:return: the comment of the account about the anime.
:rtype: str
"""
return self.__my_comments
@property
@decorators.my_load
def my_fan_sub_groups(self):
"""
:return: the fan sub groups
:rtype: str
"""
return self.__my_fan_sub_groups
def my_reload(self):
"""
Reloading data from MAL.
"""
from pymal import global_functions
# Getting content wrapper <div>
content_wrapper_div = global_functions.get_content_wrapper_div(
self.__my_mal_url, self._account.auth_connect)
bas_result = content_wrapper_div.find(name='div',
attrs={'class': 'badresult'})
if bas_result is not None:
raise exceptions.FailedToReloadError(bas_result)
# Getting content <td>
content_div = content_wrapper_div.find(
name="div", attrs={"id": "content"}, recursive=False)
if content_div is None:
raise exceptions.FailedToReloadError(content_wrapper_div)
content_td = content_div.table.tr.td
if content_td is None:
raise exceptions.FailedToReloadError(content_div)
# Getting content rows <tr>
content_form = content_td.find(name="form", attrs={'id': "myAnimeForm"})
if content_form is None:
raise exceptions.FailedToReloadError(content_td)
content_rows = content_form.table.tbody.findAll(
name="tr", recursive=False)
contents_divs_index = 2
# Getting my_status
status_select = content_rows[contents_divs_index].find(
name="select", attrs={"id": "status", "name": "status"})
if status_select is None:
raise exceptions.FailedToReloadError(content_rows)
# TODO: make this look better
status_selected_options = list(filter(
lambda x: 'selected' in x.attrs,
status_select.findAll(name="option")
))
if 1 != len(status_selected_options):
raise exceptions.FailedToReloadError(status_selected_options)
self.__my_status = int(status_selected_options[0]['value'])
is_rewatch_node = content_rows[contents_divs_index].find(
name="input", attrs={"id": "rewatchingBox"})
if is_rewatch_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_is_rewatching = bool(is_rewatch_node['value'])
contents_divs_index += 1
# Getting watched episodes
watched_input = content_rows[contents_divs_index].\
find(name="input", attrs={"id": "completedEpsID",
"name": "completed_eps"})
if watched_input is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_completed_episodes = int(watched_input['value'])
contents_divs_index += 1
# Getting my_score
score_select = content_rows[contents_divs_index].find(
name="select", attrs={"name": "score"})
if score_select is None:
raise exceptions.FailedToReloadError(content_rows)
score_selected_option = score_select.find(
name="option", attrs={"selected": ""})
if score_selected_option is None:
raise exceptions.FailedToReloadError(score_select)
self.__my_score = int(float(score_selected_option['value']))
contents_divs_index += 1
# Getting my_tags...
tag_content = content_rows[contents_divs_index]
tag_textarea = tag_content.find(
name="textarea", attrs={"name": "tags"})
self.__my_tags = frozenset(tag_textarea.text.split(self.__TAG_SEPARATOR))
contents_divs_index += 1
# Getting start date
start_month_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startMonth"})
if start_month_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_month_date = start_month_date_node.find(
name="option", attrs={"selected": ""})
start_day_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startDay"})
if start_day_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_day_date = start_day_date_node.find(
name="option", attrs={"selected": ""})
start_year_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startYear"})
if start_year_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_year_date = start_year_date_node.find(
name="option", attrs={"selected": ""})
start_month_date = str(start_month_date['value']).zfill(2)
start_day_date = str(start_day_date['value']).zfill(2)
start_year_date = str(start_year_date['value']).zfill(2)
self.__my_start_date = start_month_date + \
start_day_date + start_year_date
contents_divs_index += 1
# Getting end date
end_month_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endMonth"})
if end_month_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_month_date = end_month_date_node.find(
name="option", attrs={"selected": ""})
end_day_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endDay"})
if end_day_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_day_date = end_day_date_node.find(
name="option", attrs={"selected": ""})
end_year_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endYear"})
if end_year_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_year_date = end_year_date_node.find(
name="option", attrs={"selected": ""})
end_month_date = str(end_month_date['value']).zfill(2)
end_day_date = str(end_day_date['value']).zfill(2)
end_year_date = str(end_year_date['value']).zfill(2)
self.__my_end_date = end_month_date + end_day_date + end_year_date
contents_divs_index += 1
# Getting fansub group
fansub_group_content = content_rows[contents_divs_index]
fansub_group_input = fansub_group_content.find(
name="input", attrs={"name": "fansub_group"})
self.__my_fan_sub_groups = fansub_group_input.text
contents_divs_index += 1
# Getting priority
priority_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "priority"})
if priority_node is None:
raise exceptions.FailedToReloadError(content_rows)
selected_priority_node = priority_node.find(
name="option", attrs={"selected": ""})
if selected_priority_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_priority = int(selected_priority_node['value'])
contents_divs_index += 1
# Getting storage
storage_type_node = content_rows[contents_divs_index].find(
name="select", attrs={"id": "storage"})
if storage_type_node is None:
raise exceptions.FailedToReloadError(content_rows)
selected_storage_type_node = storage_type_node.find(
name="option", attrs={"selected": ""})
if selected_storage_type_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_storage_type = int(selected_storage_type_node['value'])
storage_value_node = content_rows[contents_divs_index].find(
name="input", attrs={"id": "storageValue"})
if storage_value_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_storage_value = float(storage_value_node['value'])
contents_divs_index += 1
# Getting downloaded episodes
downloaded_episodes_node = content_rows[contents_divs_index].\
find(name="input", attrs={'id': "epDownloaded",
'name': 'list_downloaded_eps'})
if downloaded_episodes_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_download_episodes == int(downloaded_episodes_node['value'])
contents_divs_index += 1
# Getting time rewatched
times_rewatched_node = content_rows[contents_divs_index].find(
name="input", attrs={'name': 'list_times_watched'})
self.__my_times_rewatched == int(times_rewatched_node['value'])
if times_rewatched_node is None:
raise exceptions.FailedToReloadError(content_rows)
contents_divs_index += 1
# Getting rewatched value
rewatch_value_node = content_rows[contents_divs_index].find(
name="select", attrs={'name': 'list_rewatch_value'})
if rewatch_value_node is None:
raise exceptions.FailedToReloadError(content_rows)
rewatch_value_option = rewatch_value_node.find(
name='option', attrs={'selected': ''})
if rewatch_value_option is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_rewatch_value = int(rewatch_value_option['value'])
contents_divs_index += 1
# Getting comments
comment_content = content_rows[contents_divs_index]
comment_textarea = comment_content.find(
name="textarea", attrs={"name": "list_comments"})
self.__my_comments = comment_textarea.text
contents_divs_index += 1
# Getting discuss flag
discuss_node = content_rows[contents_divs_index].find(
name='select', attrs={"name": "discuss"})
if discuss_node is None:
raise exceptions.FailedToReloadError(content_rows)
self._is_my_loaded = True
def to_xml(self):
"""
:return: the anime as an xml string.
:rtype: str
"""
data = self.MY_MAL_XML_TEMPLATE.format(
self.my_completed_episodes,
self.my_status,
self.my_score,
self.my_download_episodes,
self.my_storage_type,
self.my_storage_value,
self.my_times_rewatched,
self.my_rewatch_value,
self.my_start_date,
self.my_end_date,
self.my_priority,
self.my_enable_discussion,
self.my_is_rewatching,
self.my_comments,
self.my_fan_sub_groups,
self.__TAG_SEPARATOR.join(self.my_tags)
)
return data
def add(self, account):
"""
Adding the anime to an account.
If its the same account as this owner returning this.
:param account: account to connect to the anime.
:type account: :class:`account.Account`
:return: anime connected to the account
:rtype: :class:`account_objects.my_anime.MyAnime`
"""
if account == self._account:
return self
return self.obj.add(account)
def update(self):
"""
Updating the anime data.
"""
xml = ''.join(map(lambda x: x.strip(), self.to_xml().splitlines()))
update_url = self.__MY_MAL_UPDATE_URL.format(self.id)
ret = self._account.auth_connect(
update_url,
data='data=' + xml,
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if ret != 'Updated':
raise exceptions.MyAnimeListApiUpdateError(ret)
def delete(self):
"""
Deleteing the anime from the list.
"""
xml = ''.join(map(lambda x: x.strip(), self.to_xml().splitlines()))
delete_url = self.__MY_MAL_DELETE_URL.format(self.id)
ret = self._account.auth_connect(
delete_url,
data='data=' + xml,
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if ret != 'Deleted':
raise exceptions.MyAnimeListApiDeleteError(ret)
def increase(self) -> bool:
"""
Increasing the watched episode.
If it is completed, setting the flag of rewatching.
:return: True if succeed to set every.
:rtype: bool
"""
if self.my_completed_episodes >= self.obj.episodes:
return False
if 0 == self.my_completed_episodes and 2 != self.my_status:
self.my_is_rewatching = True
self.my_times_rewatched += 1
self.my_completed_episodes = 0
self.my_completed_episodes += 1
return True
def increase_downloaded(self) -> bool:
"""
Increasing the downloaded episode.
:return: True if succeed to set every.
:rtype: bool
"""
if self.my_download_episodes >= self.obj.episodes:
return False
self.my_download_episodes += 1
return True
@property
def is_completed(self) -> bool:
"""
:return: True if the number of completed episode is equal to number of episode in anime.
:rtype: bool
"""
return self.my_completed_episodes == self.obj.episodes
def set_completed(self) -> bool:
"""
Setting the anime as completed.
:return: True if succeed
:rtype: bool
"""
if self.obj.episodes == float('inf'):
return False
self.my_completed_episodes = self.obj.episodes
self.my_is_rewatching = False
self.my_status = 2
return True
def set_completed_download(self) -> bool:
"""
Setting the number of downloaded episodes as completed.
:return: True if succeed
:rtype: bool
"""
if self.obj.episodes == float('inf'):
return False
self.my_download_episodes = self.obj.episodes
return True
def __getattr__(self, name):
return getattr(self.obj, name)
def __dir__(self):
return list(set(dir(type(self)) + list(self.__dict__.keys()) + dir(self.obj)))
def __eq__(self, other):
return self.obj == other
def __hash__(self):
return hash(self.obj)
def __repr__(self):
title = " '{0:s}'".format(self.title) if self.obj._is_loaded else ''
return "<{0:s}{1:s} of account '{2:s}' id={3:d}>".format(
self.__class__.__name__, title, self._account.username, self.id)
|
Probing the force-induced dissociation of aptamer-protein complexes. Aptamers are emerging as powerful synthetic bioreceptors for fundamental research, diagnostics, and therapeutics. For further advances, it is important to gain a better understanding of how aptamers interact with their targets. In this work, we have used magnetic force-induced dissociation experiments to study the dissociation process of two different aptamer-protein complexes, namely for hIgE and Ara h 1. The measurements show that both complexes exhibit dissociation with two distinct regimes: the dissociation rate depends weakly on the applied force at high forces but depends stronger on force at low forces. We attribute these observations to the existence of at least one intermediate state and at least two energy barriers in the aptamer-protein interaction. The measured spontaneous dissociation rate constants were validated with SPR using both Biacore and fiber optic technology. This work demonstrates the potential of the magnetic force-induced dissociation approach for an in-depth study of the dissociation kinetics of aptamer-protein bonds, which is not possible with SPR technologies. The results will help in the development and expansion of aptamers as bioaffinity probes. |
<filename>iv/symbol.h
#ifndef IV_SYMBOL_H_
#define IV_SYMBOL_H_
#include <cstddef>
#include <iv/symbol_fwd.h>
#include <iv/default_symbol_provider.h>
namespace iv {
namespace core {
typedef const std::u16string* StringSymbol;
namespace symbol {
static const Symbol kDummySymbol =
MakeSymbol(static_cast<std::u16string*>(nullptr));
// Symbol key traits for QHashMap
struct KeyTraits {
static unsigned hash(Symbol val) {
return std::hash<Symbol>()(val);
}
static bool equals(Symbol lhs, Symbol rhs) {
return lhs == rhs;
}
// because of Array index
static Symbol null() { return kDummySymbol; }
};
} } } // namespace iv::core::symbol
#endif // IV_SYMBOL_H_
|
<gh_stars>0
/*
* (C) Copyright 2019 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cybernian.ether_io.drivers;
import java.io.IOException;
import com.cybernian.ether_io.core.IO24Core;
/**
* Provides data communication with the Ether IO24TPC digital I/O Ethernet board.
*
* @author <NAME>
*/
public class IO24TPC extends IO24Core {
/**
* Creates a Datagram Socket to communicate with the I/O board at the given IP
* address.
*
* @param ipAddress
* IP address of the I/O board.
* @throws IOException
* Thrown if the Datagram Socket fails to be created.
*
* @see <code>DatagramSocket</code>
*/
public IO24TPC(String ipAddress) throws IOException {
super(ipAddress);
}
/**
* Creates a Datagram Socket, with the specified timeout, to communicate with the I/O board at the given IP
* address.
*
* @param ipAddress
* IP address of the I/O board.
* @param datagramSocketTimeout
* The time out to be used by the UPD Socket connection.
*
* @throws IOException
* Thrown if the Datagram Socket fails to be created.
*
* @see <code>DatagramSocket</code>
*/
public IO24TPC(String ipAddress, int datagramSocketTimeout) throws IOException {
super(ipAddress, datagramSocketTimeout);
}
/* (non-Javadoc)
* @see com.cbt.io24.IO24Core#readPortPullUp(char)
*/
@Override
public byte[] readPortPullUp(char ioPort) throws IOException {
// Number of bytes returned in the response packet for this read command.
int BYTES_RETURNED = 3;
this.isPortLetterValid(ioPort);
// Convert the port letter into a port read command create the request packet.
byte[] sendData = new byte[] { (byte) (int) '%', (byte) (int) Character.toLowerCase(ioPort) };
byte[] returnData = this.readData(sendData, BYTES_RETURNED);
return returnData;
}
/* (non-Javadoc)
* @see com.cbt.io24.IO24Core#writePortPullUp(char, int)
*/
@Override
public void writePortPullUp(char ioPort, int value) throws IOException {
this.isPortLetterValid(ioPort);
byte[] data = new byte[] { (byte) (int) '%', (byte) (int) Character.toUpperCase(ioPort), (byte) value };
this.send(data);
}
/**
* Raises the Pin Value on the specified IO Pin. The corresponding IO Pin must
* be set to Output for this command to have effect.
*
* @param pinNumber
* 0-23
* Port A Pins correspond to 0 ‐ 7
* Port B Pins correspond to 8 ‐15
* Port C Pins correspond to 16 ‐ 23.
* @throws IOException I/O exception of some sort has occurred.
*/
public void raiseIO_Pin (int pinNumber) throws IOException {
byte[] data = new byte[] { (byte) (int) 'H', (byte) pinNumber };
this.send(data);
}
/**
* Lowers the Pin Value on the specified IO Pin. The corresponding IO Pin must
* be set to Output for this command to have effect.
*
* @param pinNumber
* 0-23
* Port A Pins correspond to 0 ‐ 7
* Port B Pins correspond to 8 ‐15
* Port C Pins correspond to 16 ‐ 23.
* @throws IOException I/O exception of some sort has occurred.
*/
public void lowerIO_Pin (int pinNumber) throws IOException{
byte[] data = new byte[] { (byte) (int) 'L', (byte) pinNumber };
this.send(data);
}
}
|
The Teachers Roles in a Student-Centered Audio-Video Speaking Class Abstract The extensive use of modern information technology in Chinese college English teaching has greatly changed the role of the teacher. Through a case study of the authors own audio-video speaking classes, this article aims to discover the appropriate role the teacher should play in a computer-based teaching model, and its positive effects on cultivating students competence in listening and speaking. From the analysis of correlated data and a questionnaire, it was found that the teachers role tends to be multidimensional. These multiple roles include an activity designer and organizer, coordinator, the source of background information, and an assessor. The results of the study show that such roles have a marked impact on students listening and speaking abilities and their language proficiency development. |
<reponame>krisode/dream-traveling
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package huyttd.controllers;
import java.io.File;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.annotation.WebListener;
import org.apache.log4j.PropertyConfigurator;
@WebListener
public class LogServlet implements ServletContextListener {
@Override
public void contextInitialized(ServletContextEvent sce) {
ServletContext context = sce.getServletContext();
String log4jConfigFile = context.getInitParameter("log4j-config-location");
String fullPath = context.getRealPath("") + File.separator + log4jConfigFile;
PropertyConfigurator.configure(fullPath);
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
// scheduler.shutdownNow();
}
}
|
<reponame>jcm1024/link-push-job
package org.link.push.job.core.schedule;
import org.link.push.job.core.dto.LinkJobInfo;
import org.link.push.job.core.handler.AbstractJobHandler;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextClosedEvent;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.scheduling.support.CronTrigger;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.util.Enumeration;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledFuture;
/**
* 定时任务管理器
*
* @author ginger
* @date 2021/1/7 14:04
*/
@Component
public class LinkScheduleManager implements ApplicationListener<ContextClosedEvent> {
@Resource
private ThreadPoolTaskScheduler threadPoolTaskScheduler;
/**
* 任务记录
*/
private final ConcurrentHashMap<Long, ScheduledFuture<?>> TASK_MAP = new ConcurrentHashMap();
/**
* 新增任务
*/
public void add(LinkJobInfo linkJobInfo, AbstractJobHandler<?> abstractJobHandler) {
if (linkJobInfo == null) {
throw new RuntimeException("linkJobInfo is null.");
}
if (linkJobInfo.getId() == null) {
throw new RuntimeException("job's id is null.");
}
if (linkJobInfo.getCorn() == null) {
throw new RuntimeException("job's corn is null.");
}
if (abstractJobHandler == null) {
throw new RuntimeException("abstractJobHandler is null.");
}
CronTrigger cronTrigger = new CronTrigger(linkJobInfo.getCorn());
ScheduledFuture<?> schedule = threadPoolTaskScheduler.schedule(abstractJobHandler, cronTrigger);
if (schedule == null) {
throw new RuntimeException("schedule is null.");
}
TASK_MAP.put(linkJobInfo.getId(), schedule);
}
/**
* 取消任务
*/
public void cancel(Long jobId) {
// TODO 自动路由通知到相应的执行服务器
ScheduledFuture<?> schedule = TASK_MAP.get(jobId);
if (schedule != null) {
schedule.cancel(Boolean.TRUE);
}
}
@Override
public void onApplicationEvent(ContextClosedEvent contextClosedEvent) {
Enumeration<ScheduledFuture<?>> elements = TASK_MAP.elements();
while (elements.hasMoreElements()) {
ScheduledFuture<?> scheduledFuture = elements.nextElement();
scheduledFuture.cancel(Boolean.TRUE);
}
}
}
|
<gh_stars>10-100
// Copyright 2018, Oath Inc
// Licensed under the terms of the Apache 2.0 license. See LICENSE file in https://github.com/r2ishiguro/mls for terms.
package ds
import (
"testing"
"net"
"fmt"
"time"
"strings"
"io"
"bufio"
)
const (
testAddr = "localhost:9897"
numTests = 10
testBufSize = 1024*1024
)
type Client struct {
conn net.Conn
id int
}
func TestNet(t *testing.T) {
l, err := net.Listen("tcp", testAddr)
if err != nil {
t.Fatal(err)
}
go func(l net.Listener) {
var conns []net.Conn
for {
conn, err := l.Accept()
if err != nil {
t.Fatal(err)
}
fmt.Printf("accepted %s\n", conn.RemoteAddr().String())
conns = append(conns, conn)
go func(conn net.Conn) {
buf := make([]byte, testBufSize)
bio := bufio.NewReader(conn)
for {
n, err := io.ReadFull(bio, buf)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("server: Read error: %s", err)
}
if n != len(buf) {
t.Errorf("server: got %d bytes", n)
}
fmt.Printf("server: got %d\n", buf[0])
for _, w := range conns {
n, err = w.Write(buf)
if err != nil {
t.Fatalf("server: conn.Write error: %s", err)
}
if n != len(buf) {
t.Errorf("server: sent %d bytes", n)
}
}
}
}(conn)
}
}(l)
time.Sleep(1 * time.Second)
var clients []Client
for nclients := 0; nclients < 10; nclients++ {
conn, err := net.Dial("tcp", testAddr)
if err != nil {
t.Fatal(err)
}
c := Client{conn, nclients}
go func(c *Client) {
var epoch = 0
buf := make([]byte, testBufSize)
bio := bufio.NewReader(c.conn)
for {
n, err := io.ReadFull(bio, buf)
if err != nil {
if err == io.EOF {
break
}
// ssrly!?
str := err.Error()
if strings.Contains(str, "use of closed network connection") {
break
}
t.Fatalf("client: conn.Read error: %s", err)
}
if n != len(buf) {
t.Errorf("client: got %d bytes", n)
}
fmt.Printf("client[%d]: got %d\n", c.id, buf[0])
if epoch != int(buf[0]) {
t.Errorf("client[%d]: got %d, have %d\n", c.id, buf[0], epoch)
}
epoch++
}
if epoch != numTests {
t.Errorf("client[%d]: epoch = %d", c.id, epoch)
}
}(&c)
clients = append(clients, c)
}
time.Sleep(100 * time.Millisecond)
c := clients[0]
buf := make([]byte, testBufSize)
for i := 0; i < numTests; i++ {
buf[0] = byte(i)
n, err := c.conn.Write(buf)
if err != nil {
t.Fatal(err)
}
if n != len(buf) {
t.Errorf("sent %d bytes", n)
}
}
time.Sleep(1 * time.Second)
for _, c := range clients {
c.conn.Close()
}
}
|
Calculation of the costs of health care services for road accident victims in TDABC: a systematic review of the literature The purpose of this article is to conduct a systematic literature review on Time-Driven Activity-Based Costing (TDABC) of health care services for traffic accident victims that could be effective in better understanding how consumption and resource use occur and capturing the cost of providing care for relevant decisions more accurately. We searched for systematic international literature in English and French, including PubMed/MEDLINE, Web of Science and Scopus databases. These databases were explored in terms of their relevance to health care. The objective is to provide a synthesis of the current state of the literature on modelling the cost of health care services for road traffic victims in TDABC. TDABC is applicable in the field of health care services for road accident victims and can help to make costing processes more efficient. Thus it overcomes a major challenge related to traditional cost accounting methods. Nine studies met the inclusion criteria in various health care disciplines such as surgery, home care, and general health care services. The application of TDABC Costing should be progressively integrated into the functional health care systems of road accident victims while following and building on the recommendations described in this study and the problem definition we have formulated. The aim of this work is to provide healthcare organizations with an overview of stable and reliable cost accounting practices associated with the health care cycle of road accident victims. |
Tired of all the winning.
On Monday, President Trump took time from his busy schedule of re-declaring national holidays and threatening nuclear war with North Korea to play a little golf in the rain. His companion was South Carolina senator Lindsey Graham, with whom he has had a complicated relationship.
Afterward, Graham, throwing his dignity by the wayside, tweeted that Trump had shot a 73, and that he had beaten the senator soundly.
Really enjoyed a round of golf with President @realDonaldTrump today.
President Trump shot a 73 in windy and wet conditions! — Lindsey Graham (@LindseyGrahamSC) October 9, 2017
How bad did he beat me? I did better in the presidential race than today on the golf course!
Great fun. Great host. — Lindsey Graham (@LindseyGrahamSC) October 9, 2017
President Trump plays a lot of golf (really, a lot), but unless his long game is as brilliant as his ability to polarize the country, Graham’s story is implausible on its face:
That course is a par-72. The Senior PGA was held there this year. Look at the scores of the world's top seniors https://t.co/BLQPH4K7rx pic.twitter.com/JTQSgZGaHr — Daniel Dale (@ddale8) October 9, 2017
Trump shot a 73 the same way Kim Jong Il once hit fives holes in one in a single round.
Maybe, rather than muster an attempt at plausibility, Graham should have fully embraced the absurdity of his situation. |
<gh_stars>100-1000
#[cfg(target_arch = "x86")]
use std::arch::x86::{
__m128i,
_mm_and_si128,
_mm_cmpeq_epi8,
_mm_extract_epi32,
_mm_loadu_si128,
_mm_sad_epu8,
_mm_set1_epi8,
_mm_setzero_si128,
_mm_sub_epi8,
_mm_xor_si128,
};
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::{
__m128i,
_mm_and_si128,
_mm_cmpeq_epi8,
_mm_extract_epi32,
_mm_loadu_si128,
_mm_sad_epu8,
_mm_set1_epi8,
_mm_setzero_si128,
_mm_sub_epi8,
_mm_xor_si128,
};
#[target_feature(enable = "sse2")]
pub unsafe fn _mm_set1_epu8(a: u8) -> __m128i {
_mm_set1_epi8(a as i8)
}
#[target_feature(enable = "sse2")]
pub unsafe fn mm_cmpneq_epi8(a: __m128i, b: __m128i) -> __m128i {
_mm_xor_si128(_mm_cmpeq_epi8(a, b), _mm_set1_epi8(-1))
}
const MASK: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
];
#[target_feature(enable = "sse2")]
unsafe fn mm_from_offset(slice: &[u8], offset: usize) -> __m128i {
_mm_loadu_si128(slice.as_ptr().offset(offset as isize) as *const _)
}
#[target_feature(enable = "sse2")]
unsafe fn sum(u8s: &__m128i) -> usize {
let sums = _mm_sad_epu8(*u8s, _mm_setzero_si128());
(_mm_extract_epi32(sums, 0) + _mm_extract_epi32(sums, 2)) as usize
}
#[target_feature(enable = "sse2")]
pub unsafe fn chunk_count(haystack: &[u8], needle: u8) -> usize {
assert!(haystack.len() >= 16);
let mut offset = 0;
let mut count = 0;
let needles = _mm_set1_epu8(needle);
// 4080
while haystack.len() >= offset + 16 * 255 {
let mut counts = _mm_setzero_si128();
for _ in 0..255 {
counts = _mm_sub_epi8(
counts,
_mm_cmpeq_epi8(mm_from_offset(haystack, offset), needles)
);
offset += 16;
}
count += sum(&counts);
}
// 2048
if haystack.len() >= offset + 16 * 128 {
let mut counts = _mm_setzero_si128();
for _ in 0..128 {
counts = _mm_sub_epi8(
counts,
_mm_cmpeq_epi8(mm_from_offset(haystack, offset), needles)
);
offset += 16;
}
count += sum(&counts);
}
// 16
let mut counts = _mm_setzero_si128();
for i in 0..(haystack.len() - offset) / 16 {
counts = _mm_sub_epi8(
counts,
_mm_cmpeq_epi8(mm_from_offset(haystack, offset + i * 16), needles)
);
}
if haystack.len() % 16 != 0 {
counts = _mm_sub_epi8(
counts,
_mm_and_si128(
_mm_cmpeq_epi8(mm_from_offset(haystack, haystack.len() - 16), needles),
mm_from_offset(&MASK, haystack.len() % 16)
)
);
}
count += sum(&counts);
count
}
#[target_feature(enable = "sse2")]
unsafe fn is_leading_utf8_byte(u8s: __m128i) -> __m128i {
mm_cmpneq_epi8(_mm_and_si128(u8s, _mm_set1_epu8(0b1100_0000)), _mm_set1_epu8(0b1000_0000))
}
#[target_feature(enable = "sse2")]
pub unsafe fn chunk_num_chars(utf8_chars: &[u8]) -> usize {
assert!(utf8_chars.len() >= 16);
let mut offset = 0;
let mut count = 0;
// 4080
while utf8_chars.len() >= offset + 16 * 255 {
let mut counts = _mm_setzero_si128();
for _ in 0..255 {
counts = _mm_sub_epi8(
counts,
is_leading_utf8_byte(mm_from_offset(utf8_chars, offset))
);
offset += 16;
}
count += sum(&counts);
}
// 2048
if utf8_chars.len() >= offset + 16 * 128 {
let mut counts = _mm_setzero_si128();
for _ in 0..128 {
counts = _mm_sub_epi8(
counts,
is_leading_utf8_byte(mm_from_offset(utf8_chars, offset))
);
offset += 16;
}
count += sum(&counts);
}
// 16
let mut counts = _mm_setzero_si128();
for i in 0..(utf8_chars.len() - offset) / 16 {
counts = _mm_sub_epi8(
counts,
is_leading_utf8_byte(mm_from_offset(utf8_chars, offset + i * 16))
);
}
if utf8_chars.len() % 16 != 0 {
counts = _mm_sub_epi8(
counts,
_mm_and_si128(
is_leading_utf8_byte(mm_from_offset(utf8_chars, utf8_chars.len() - 16)),
mm_from_offset(&MASK, utf8_chars.len() % 16)
)
);
}
count += sum(&counts);
count
}
|
<filename>decoder/Cdef.cpp<gh_stars>1-10
/*
* Copyright 2020, av1dec authors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Cdef.h"
#include "Parser.h"
#include <limits>
namespace YamiAv1 {
using namespace Yami;
Cdef::Cdef(const ConstFramePtr& frame)
: m_frame(frame)
, m_sequence(*m_frame->m_sequence)
, m_cdef(m_frame->m_cdef)
{
}
std::shared_ptr<YuvFrame> Cdef::filter(const std::shared_ptr<YuvFrame>& frame)
{
std::shared_ptr<YuvFrame> cdef = YuvFrame::create(frame);
if (!cdef)
return cdef;
int step4 = Num_4x4_Blocks_Wide[BLOCK_8X8];
int cdefSize4 = Num_4x4_Blocks_Wide[BLOCK_64X64];
int cdefMask4 = ~(cdefSize4 - 1);
for (int r = 0; r < m_frame->MiRows; r += step4) {
for (int c = 0; c < m_frame->MiCols; c += step4) {
int baseR = r & cdefMask4;
int baseC = c & cdefMask4;
int idx = m_cdef.cdef_idx[baseR][baseC];
cdef_block(cdef, frame, r, c, idx);
}
}
return cdef;
}
const int Cdef_Uv_Dir[2][2][8] = {
{ { 0, 1, 2, 3, 4, 5, 6, 7 },
{ 1, 2, 2, 2, 3, 4, 6, 0 } },
{ { 7, 0, 2, 4, 5, 6, 6, 6 },
{ 0, 1, 2, 3, 4, 5, 6, 7 } }
};
const ModeInfoBlock& Cdef::getModeInfo(int row, int col)
{
return m_frame->getModeInfo(row, col);
}
void Cdef::cdef_block(const std::shared_ptr<YuvFrame>& cdef,
const std::shared_ptr<YuvFrame>& frame,
int r, int c, int idx)
{
if (idx == -1)
return;
int coeffShift = m_sequence.BitDepth - 8;
bool skip = (getModeInfo(r, c).Skip && getModeInfo(r + 1, c).Skip
&& getModeInfo(r, c + 1).Skip && getModeInfo(r + 1, c + 1).Skip);
if (skip)
return;
int yDir, var;
cdefDirection(frame, r, c, yDir, var);
int priStr = m_cdef.cdef_y_pri_strength[idx] << coeffShift;
int secStr = m_cdef.cdef_y_sec_strength[idx] << coeffShift;
int dir = (priStr == 0) ? 0 : yDir;
int varStr = (var >> 6) ? std::min(FloorLog2(var >> 6), 12) : 0;
priStr = (var ? (priStr * (4 + varStr) + 8) >> 4 : 0);
int damping = m_cdef.CdefDamping + coeffShift;
cdefFilter(cdef, frame, 0, r, c, priStr, secStr, damping, dir);
if (m_sequence.NumPlanes) {
priStr = m_cdef.cdef_uv_pri_strength[idx] << coeffShift;
secStr = m_cdef.cdef_uv_sec_strength[idx] << coeffShift;
dir = (priStr == 0) ? 0 : Cdef_Uv_Dir[m_sequence.subsampling_x][m_sequence.subsampling_y][yDir];
damping = m_cdef.CdefDamping + coeffShift - 1;
cdefFilter(cdef, frame, 1, r, c, priStr, secStr, damping, dir);
cdefFilter(cdef, frame, 2, r, c, priStr, secStr, damping, dir);
}
}
const static int Cdef_Pri_Taps[2][2] = {
{ 4, 2 }, { 3, 3 }
};
const static int Cdef_Sec_Taps[2][2] = {
{ 2, 1 }, { 2, 1 }
};
static int constrain(int diff, int threshold, int damping)
{
if (!threshold)
return 0;
int dampingAdj = std::max(0, damping - FloorLog2(threshold));
int sign = (diff < 0) ? -1 : 1;
return sign * CLIP3(0, std::abs(diff), threshold - (std::abs(diff) >> dampingAdj));
}
const static int Cdef_Directions[8][2][2] = {
{ { -1, 1 }, { -2, 2 } },
{ { 0, 1 }, { -1, 2 } },
{ { 0, 1 }, { 0, 2 } },
{ { 0, 1 }, { 1, 2 } },
{ { 1, 1 }, { 2, 2 } },
{ { 1, 0 }, { 2, 1 } },
{ { 1, 0 }, { 2, 0 } },
{ { 1, 0 }, { 2, -1 } }
};
bool Cdef::is_inside_filter_region(int candidateR, int candidateC)
{
int colStart = 0;
int colEnd = m_frame->MiCols;
int rowStart = 0;
int rowEnd = m_frame->MiRows;
return (candidateC >= colStart && candidateC < colEnd && candidateR >= rowStart && candidateR < rowEnd);
}
uint8_t Cdef::cdef_get_at(const std::shared_ptr<YuvFrame>& frame,
int plane, int x0, int y0, int i, int j, int dir, int k,
int sign, int subX, int subY, bool& CdefAvailable)
{
int y = y0 + i + sign * Cdef_Directions[dir][k][0];
int x = x0 + j + sign * Cdef_Directions[dir][k][1];
int candidateR = (y << subY) >> MI_SIZE_LOG2;
int candidateC = (x << subX) >> MI_SIZE_LOG2;
if (is_inside_filter_region(candidateR, candidateC)) {
CdefAvailable = true;
return frame->getPixel(plane, x, y);
} else {
CdefAvailable = false;
return 0;
}
}
void Cdef::cdefFilter(const std::shared_ptr<YuvFrame>& cdef,
const std::shared_ptr<YuvFrame>& frame,
int plane, int r, int c, int priStr, int secStr, int damping, int dir)
{
int coeffShift = m_sequence.BitDepth - 8;
int subX = (plane > 0) ? m_sequence.subsampling_x : 0;
int subY = (plane > 0) ? m_sequence.subsampling_y : 0;
int x0 = (c * MI_SIZE) >> subX;
int y0 = (r * MI_SIZE) >> subY;
int w = 8 >> subX;
int h = 8 >> subY;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int sum = 0;
uint8_t x = frame->getPixel(plane, x0 + j, y0 + i);
uint8_t max = x;
uint8_t min = x;
for (int k = 0; k < 2; k++) {
for (int sign = -1; sign <= 1; sign += 2) {
bool CdefAvailable;
uint8_t p = cdef_get_at(frame, plane, x0, y0, i, j, dir, k, sign, subX, subY, CdefAvailable);
if (CdefAvailable) {
sum += Cdef_Pri_Taps[(priStr >> coeffShift) & 1][k] * constrain(p - x, priStr, damping);
max = std::max(p, max);
min = std::min(p, min);
}
for (int dirOff = -2; dirOff <= 2; dirOff += 4) {
uint8_t s = cdef_get_at(frame, plane, x0, y0, i, j, (dir + dirOff) & 7, k, sign, subX, subY, CdefAvailable);
if (CdefAvailable) {
sum += Cdef_Sec_Taps[(priStr >> coeffShift) & 1][k] * constrain(s - x, secStr, damping);
max = std::max(s, max);
min = std::min(s, min);
}
}
}
}
uint8_t pixel = CLIP3(min, max, x + ((8 + sum - (sum < 0)) >> 4));
cdef->setPixel(plane, x0 + j, y0 + i, pixel);
}
}
}
static const int Div_Table[9] = {
0, 840, 420, 280, 210, 168, 140, 120, 105
};
void Cdef::cdefDirection(const std::shared_ptr<YuvFrame>& frame,
int r, int c, int& yDir, int& var)
{
int cost[8], partial[8][15];
for (int i = 0; i < 8; i++) {
cost[i] = 0;
for (int j = 0; j < 15; j++) {
partial[i][j] = 0;
}
}
int bestCost = 0;
yDir = 0;
int x0 = c << MI_SIZE_LOG2;
int y0 = r << MI_SIZE_LOG2;
uint8_t BitDepth = m_sequence.BitDepth;
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++) {
int x = (frame->getPixel(0, x0 + j, y0 + i) >> (BitDepth - 8)) - 128;
partial[0][i + j] += x;
partial[1][i + j / 2] += x;
partial[2][i] += x;
partial[3][3 + i - j / 2] += x;
partial[4][7 + i - j] += x;
partial[5][3 - i / 2 + j] += x;
partial[6][j] += x;
partial[7][i / 2 + j] += x;
}
}
for (int i = 0; i < 8; i++) {
cost[2] += partial[2][i] * partial[2][i];
cost[6] += partial[6][i] * partial[6][i];
}
cost[2] *= Div_Table[8];
cost[6] *= Div_Table[8];
for (int i = 0; i < 7; i++) {
cost[0] += (partial[0][i] * partial[0][i] + partial[0][14 - i] * partial[0][14 - i]) * Div_Table[i + 1];
cost[4] += (partial[4][i] * partial[4][i] + partial[4][14 - i] * partial[4][14 - i]) * Div_Table[i + 1];
}
cost[0] += partial[0][7] * partial[0][7] * Div_Table[8];
cost[4] += partial[4][7] * partial[4][7] * Div_Table[8];
for (int i = 1; i < 8; i += 2) {
for (int j = 0; j < 4 + 1; j++) {
cost[i] += partial[i][3 + j] * partial[i][3 + j];
}
cost[i] *= Div_Table[8];
for (int j = 0; j < 4 - 1; j++) {
cost[i] += (partial[i][j] * partial[i][j]
+ partial[i][10 - j] * partial[i][10 - j])
* Div_Table[2 * j + 2];
}
}
for (int i = 0; i < 8; i++) {
if (cost[i] > bestCost) {
bestCost = cost[i];
yDir = i;
}
}
var = (bestCost - cost[(yDir + 4) & 7]) >> 10;
}
}
|
Political economy, Weltanschauung and the transformation of European security European security changed dramatically over the period 19802000. The first period ended in 1991 with the fall of the Soviet Union. In the second period from 1991 to 1994, the European Union tried to assert leadership. The United States provided leadership in the third period from 1994. The economic fortunes of these three major players and expectations about them played a major role. Moreover the shift in ideological emphasis in the US from neorealism to neoWilsonian and unexpected American economic growth were also important in marking the shift from the second to the third period. |
# region header
import sys
from bisect import bisect_left, bisect_right, insort_left, insort_right
from collections import defaultdict, deque, Counter
from copy import deepcopy
from fractions import gcd
from functools import lru_cache, reduce
from heapq import heappop, heappush
from itertools import accumulate, groupby, product, permutations, combinations, combinations_with_replacement
from math import ceil, floor, factorial, log, sqrt, sin, cos, pi, e
from operator import itemgetter
sys.setrecursionlimit(10**6)
rs = lambda: sys.stdin.readline().rstrip()
ri = lambda: int(rs())
rf = lambda: float(rs())
rs_ = lambda: [_ for _ in rs().split()]
ri_ = lambda: [int(_) for _ in rs().split()]
rf_ = lambda: [float(_) for _ in rs().split()]
INF = float('inf')
MOD = 10 ** 9 + 7
# endregion
S = Counter(rs())
if list(S.values()) == [2, 2]:
print('Yes')
else:
print('No') |
/* eslint-disable no-restricted-exports */
export { default } from 'next-auth/providers/battlenet';
|
Alija via Getty Images
Except within the small Muslim and orthodox Jewish communities, people in Denmark wonder why on Earth any parents would want to have their precious newborn child held down to have a part of his healthy, yet immature, penis cut off. According to a nationally representative poll from the summer of 2016, 87 percent of Danes favor a legal ban on non-therapeutic circumcision of boys under the age of 18 years. So far, politicians have been hesitant, but increasingly willing to listen.
Doctors and medical organizations in Denmark, the other Nordic countries and, with one notable exception, elsewhere in the Western world agree that circumcision of healthy boys is ethically problematic. It is considered an operation seriously and patently at odds with the Hippocratic oath (”first do no harm”) and one that is in conflict with a variety of international conventions, most notably the U.N. Declaration of the Rights of the Child.
The one Western country that is out of sync with its international peers is the United States, whose federal health authorities and national associations of pediatricians, obstetricians, family physicians and urologists endorse and perform most of these medically unnecessary operations in the country. Amputation of healthy infant foreskins constitutes the single most common surgical procedure in the United States ― a several hundred million dollars a year industry.
Internationally, several medical associations have issued policies and recommendations that contradict the popular belief in the United States that infant male circumcision is a harmless, health-promoting procedure. In fact, not one medical association in the whole world recommends circumcision of healthy boys.
In December of 2016, the Danish Medical Association published its revised policy on circumcision. Speaking on behalf of its 29,185 members, the new policy came out in an unusually clear voice. Its central passage goes like this (my unofficial translation):
Circumcision of boys without a medical indication is ethically unacceptable when the procedure is carried out without informed consent from the person undergoing the surgery. Therefore, circumcision should not be performed before the boy is 18 years old and able to decide whether this is an operation he wants.
Many Americans, who grew up in a culture whose medical authorities and mass media promote the view that an intact penis is dangerous, prone to infection, ugly and difficult to keep clean, may wonder what the penile health situation would be like in a country like Denmark, where few boys undergo circumcision. Of course, occasional intact men will encounter penile problems during their lifetime, just like people with natural teeth or appendices may develop cavities or appendicitis at some point later on. However, removing such healthy body parts on every child to prevent rare conditions in adulthood, that may be easily and effectively treated if and when they occur, is outright bad medical practice and ethics. So, why remove a healthy, functional and sensitive part of a child’s penis?
Indeed, a study published in Pediatrics in 2016 documented that only around one in 200 intact boys will develop a medical condition necessitating a circumcision before the age of 18 years. In other words, the chance is around 99.5 percent that a newborn boy can retain his valuable foreskin throughout infancy, childhood, and adolescence and enter adulthood with an intact penis. Simple information like this should urge parents to abstain from unnecessary infant surgery and let their sons decide for themselves about the size, sensitivity, functionality and appearance of their manhoods once they get old enough to understand the consequences.
In the fairytale “The Emperor’s New Clothes” by Danish author Hans Christian Andersen, a child too young to understand the desirability of keeping up the pretense that the emperor is wearing costly, elegant clothes when, in fact, he has nothing on, blurts out that the emperor is wearing nothing, and the cry is taken up by others. By speaking out frankly and without the usual diplomacy of such position papers, the Danish Medical Association impersonates that uncorrupted child shouting out against the falsehood, vanity and greed that has upheld the Empire of Circumcision for far too long. |
/* tslint:disable */
import { Action } from '@ngrx/store';
import { type } from '../util';
import { BaseLoopbackActionTypesFactory, BaseLoopbackActionsFactory } from './base';
import { LoopBackFilter, SDKToken, Message } from '../models';
export const MessageActionTypes =
Object.assign(BaseLoopbackActionTypesFactory('Message'), {
GET_DEVICE: type('[Message] getDevice'),
GET_DEVICE_SUCCESS: type('[Message] getDevice success'),
GET_DEVICE_FAIL: type('[Message] getDevice fail'),
FIND_BY_ID_GEOLOCS: type('[Message] findByIdGeolocs'),
FIND_BY_ID_GEOLOCS_SUCCESS: type('[Message] findByIdGeolocs success'),
FIND_BY_ID_GEOLOCS_FAIL: type('[Message] findByIdGeolocs fail'),
DESTROY_BY_ID_GEOLOCS: type('[Message] destroyByIdGeolocs'),
DESTROY_BY_ID_GEOLOCS_SUCCESS: type('[Message] destroyByIdGeolocs success'),
DESTROY_BY_ID_GEOLOCS_FAIL: type('[Message] destroyByIdGeolocs fail'),
UPDATE_BY_ID_GEOLOCS: type('[Message] updateByIdGeolocs'),
UPDATE_BY_ID_GEOLOCS_SUCCESS: type('[Message] updateByIdGeolocs success'),
UPDATE_BY_ID_GEOLOCS_FAIL: type('[Message] updateByIdGeolocs fail'),
GET_USER: type('[Message] getUser'),
GET_USER_SUCCESS: type('[Message] getUser success'),
GET_USER_FAIL: type('[Message] getUser fail'),
FIND_BY_ID_ORGANIZATIONS: type('[Message] findByIdOrganizations'),
FIND_BY_ID_ORGANIZATIONS_SUCCESS: type('[Message] findByIdOrganizations success'),
FIND_BY_ID_ORGANIZATIONS_FAIL: type('[Message] findByIdOrganizations fail'),
DESTROY_BY_ID_ORGANIZATIONS: type('[Message] destroyByIdOrganizations'),
DESTROY_BY_ID_ORGANIZATIONS_SUCCESS: type('[Message] destroyByIdOrganizations success'),
DESTROY_BY_ID_ORGANIZATIONS_FAIL: type('[Message] destroyByIdOrganizations fail'),
UPDATE_BY_ID_ORGANIZATIONS: type('[Message] updateByIdOrganizations'),
UPDATE_BY_ID_ORGANIZATIONS_SUCCESS: type('[Message] updateByIdOrganizations success'),
UPDATE_BY_ID_ORGANIZATIONS_FAIL: type('[Message] updateByIdOrganizations fail'),
LINK_ORGANIZATIONS: type('[Message] linkOrganizations'),
LINK_ORGANIZATIONS_SUCCESS: type('[Message] linkOrganizations success'),
LINK_ORGANIZATIONS_FAIL: type('[Message] linkOrganizations fail'),
UNLINK_ORGANIZATIONS: type('[Message] unlinkOrganizations'),
UNLINK_ORGANIZATIONS_SUCCESS: type('[Message] unlinkOrganizations success'),
UNLINK_ORGANIZATIONS_FAIL: type('[Message] unlinkOrganizations fail'),
GET_GEOLOCS: type('[Message] getGeolocs'),
GET_GEOLOCS_SUCCESS: type('[Message] getGeolocs success'),
GET_GEOLOCS_FAIL: type('[Message] getGeolocs fail'),
CREATE_GEOLOCS: type('[Message] createGeolocs'),
CREATE_GEOLOCS_SUCCESS: type('[Message] createGeolocs success'),
CREATE_GEOLOCS_FAIL: type('[Message] createGeolocs fail'),
DELETE_GEOLOCS: type('[Message] deleteGeolocs'),
DELETE_GEOLOCS_SUCCESS: type('[Message] deleteGeolocs success'),
DELETE_GEOLOCS_FAIL: type('[Message] deleteGeolocs fail'),
GET_ORGANIZATIONS: type('[Message] getOrganizations'),
GET_ORGANIZATIONS_SUCCESS: type('[Message] getOrganizations success'),
GET_ORGANIZATIONS_FAIL: type('[Message] getOrganizations fail'),
CREATE_ORGANIZATIONS: type('[Message] createOrganizations'),
CREATE_ORGANIZATIONS_SUCCESS: type('[Message] createOrganizations success'),
CREATE_ORGANIZATIONS_FAIL: type('[Message] createOrganizations fail'),
DELETE_ORGANIZATIONS: type('[Message] deleteOrganizations'),
DELETE_ORGANIZATIONS_SUCCESS: type('[Message] deleteOrganizations success'),
DELETE_ORGANIZATIONS_FAIL: type('[Message] deleteOrganizations fail'),
PUT_SIGFOX__OLD_TO_REMOVE: type('[Message] putSigfox_OldToRemove'),
PUT_SIGFOX__OLD_TO_REMOVE_SUCCESS: type('[Message] putSigfox_OldToRemove success'),
PUT_SIGFOX__OLD_TO_REMOVE_FAIL: type('[Message] putSigfox_OldToRemove fail'),
PUT_SIGFOX: type('[Message] putSigfox'),
PUT_SIGFOX_SUCCESS: type('[Message] putSigfox success'),
PUT_SIGFOX_FAIL: type('[Message] putSigfox fail'),
PUT_SIGFOX_ACKNOWLEDGE: type('[Message] putSigfoxAcknowledge'),
PUT_SIGFOX_ACKNOWLEDGE_SUCCESS: type('[Message] putSigfoxAcknowledge success'),
PUT_SIGFOX_ACKNOWLEDGE_FAIL: type('[Message] putSigfoxAcknowledge fail'),
POST_SIGFOX_STATUS: type('[Message] postSigfoxStatus'),
POST_SIGFOX_STATUS_SUCCESS: type('[Message] postSigfoxStatus success'),
POST_SIGFOX_STATUS_FAIL: type('[Message] postSigfoxStatus fail'),
CREATE_MANY_GEOLOCS: type('[Message] createManyGeolocs'),
CREATE_MANY_GEOLOCS_SUCCESS: type('[Message] createManyGeolocs success'),
CREATE_MANY_GEOLOCS_FAIL: type('[Message] createManyGeolocs fail'),
CREATE_MANY_ORGANIZATIONS: type('[Message] createManyOrganizations'),
CREATE_MANY_ORGANIZATIONS_SUCCESS: type('[Message] createManyOrganizations success'),
CREATE_MANY_ORGANIZATIONS_FAIL: type('[Message] createManyOrganizations fail'),
});
export const MessageActions =
Object.assign(BaseLoopbackActionsFactory<Message>(MessageActionTypes), {
/**
* getDevice Action.
* Fetches belongsTo relation Device.
*
* @param {any} id Message id
* @param {boolean} refresh
* @param {any} meta (optional).
*
*/
getDevice: class implements Action {
public readonly type = MessageActionTypes.GET_DEVICE;
public payload: {id: any, refresh: any};
constructor(id: any, refresh: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, refresh};
}
},
/**
* getDeviceSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
getDeviceSuccess: class implements Action {
public readonly type = MessageActionTypes.GET_DEVICE_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* getDeviceFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
getDeviceFail: class implements Action {
public readonly type = MessageActionTypes.GET_DEVICE_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* findByIdGeolocs Action.
* Find a related item by id for Geolocs.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Geolocs
* @param {any} meta (optional).
*
*/
findByIdGeolocs: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_GEOLOCS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* findByIdGeolocsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
findByIdGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_GEOLOCS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* findByIdGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
findByIdGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* destroyByIdGeolocs Action.
* Delete a related item by id for Geolocs.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Geolocs
* @param {any} meta (optional).
*
*/
destroyByIdGeolocs: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_GEOLOCS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* destroyByIdGeolocsSuccess Action.
*
* @param {any} id
* This method returns no data.
* @param {any} meta (optional).
*
*/
destroyByIdGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_GEOLOCS_SUCCESS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* destroyByIdGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
destroyByIdGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* updateByIdGeolocs Action.
* Update a related item by id for Geolocs.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Geolocs
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
updateByIdGeolocs: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_GEOLOCS;
public payload: {id: any, fk: any, data: any};
constructor(id: any, fk: any, data: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk, data};
}
},
/**
* updateByIdGeolocsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
updateByIdGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_GEOLOCS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* updateByIdGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
updateByIdGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* getUser Action.
* Fetches belongsTo relation user.
*
* @param {any} id Message id
* @param {boolean} refresh
* @param {any} meta (optional).
*
*/
getUser: class implements Action {
public readonly type = MessageActionTypes.GET_USER;
public payload: {id: any, refresh: any};
constructor(id: any, refresh: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, refresh};
}
},
/**
* getUserSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
getUserSuccess: class implements Action {
public readonly type = MessageActionTypes.GET_USER_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* getUserFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
getUserFail: class implements Action {
public readonly type = MessageActionTypes.GET_USER_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* findByIdOrganizations Action.
* Find a related item by id for Organizations.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Organizations
* @param {any} meta (optional).
*
*/
findByIdOrganizations: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_ORGANIZATIONS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* findByIdOrganizationsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
findByIdOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* findByIdOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
findByIdOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.FIND_BY_ID_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* destroyByIdOrganizations Action.
* Delete a related item by id for Organizations.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Organizations
* @param {any} meta (optional).
*
*/
destroyByIdOrganizations: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_ORGANIZATIONS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* destroyByIdOrganizationsSuccess Action.
*
* @param {any} id
* This method returns no data.
* @param {any} meta (optional).
*
*/
destroyByIdOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_ORGANIZATIONS_SUCCESS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* destroyByIdOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
destroyByIdOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.DESTROY_BY_ID_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* updateByIdOrganizations Action.
* Update a related item by id for Organizations.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Organizations
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
updateByIdOrganizations: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_ORGANIZATIONS;
public payload: {id: any, fk: any, data: any};
constructor(id: any, fk: any, data: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk, data};
}
},
/**
* updateByIdOrganizationsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
updateByIdOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* updateByIdOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
updateByIdOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.UPDATE_BY_ID_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* linkOrganizations Action.
* Add a related item by id for Organizations.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Organizations
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
linkOrganizations: class implements Action {
public readonly type = MessageActionTypes.LINK_ORGANIZATIONS;
public payload: {id: any, fk: any, data: any};
constructor(id: any, fk: any, data: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk, data};
}
},
/**
* linkOrganizationsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
linkOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.LINK_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* linkOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
linkOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.LINK_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* unlinkOrganizations Action.
* Remove the Organizations relation to an item by id.
*
* @param {any} id Message id
* @param {any} fk Foreign key for Organizations
* @param {any} meta (optional).
*
*/
unlinkOrganizations: class implements Action {
public readonly type = MessageActionTypes.UNLINK_ORGANIZATIONS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, customHeaders?: Function, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* unlinkOrganizationsSuccess Action.
*
* @param {any} id
* This method returns no data.
* @param {any} meta (optional).
*
*/
unlinkOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.UNLINK_ORGANIZATIONS_SUCCESS;
public payload: {id: any, fk: any};
constructor(id: any, fk: any, public meta?: any) {
this.payload = {id, fk};
}
},
/**
* unlinkOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
unlinkOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.UNLINK_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* getGeolocs Action.
* Queries Geolocs of Message.
*
* @param {any} id Message id
* @param {object} filter
* @param {any} meta (optional).
*
*/
getGeolocs: class implements Action {
public readonly type = MessageActionTypes.GET_GEOLOCS;
public payload: {id: any, filter: LoopBackFilter};
constructor(id: any, filter: LoopBackFilter = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, filter};
}
},
/**
* getGeolocsSuccess Action.
*
* @param {any} id
* @param {object[]} data
* @param {any} meta (optional).
*
*/
getGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.GET_GEOLOCS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* getGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
getGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.GET_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* createGeolocs Action.
* Creates a new instance in Geolocs of this model.
*
* @param {any} id Message id
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
createGeolocs: class implements Action {
public readonly type = MessageActionTypes.CREATE_GEOLOCS;
public payload: {id: any, data: any};
constructor(id: any, data: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createGeolocsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
createGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.CREATE_GEOLOCS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
createGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.CREATE_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* deleteGeolocs Action.
* Deletes all Geolocs of this model.
*
* @param {any} id Message id
* @param {any} meta (optional).
*
*/
deleteGeolocs: class implements Action {
public readonly type = MessageActionTypes.DELETE_GEOLOCS;
constructor(public payload: any, public meta?: any) {}
},
/**
* deleteGeolocsSuccess Action.
*
* @param {any} id
* This method returns no data.
* @param {any} meta (optional).
*
*/
deleteGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.DELETE_GEOLOCS_SUCCESS;
constructor(public payload: any, public meta?: any) {}
},
/**
* deleteGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
deleteGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.DELETE_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* getOrganizations Action.
* Queries Organizations of Message.
*
* @param {any} id Message id
* @param {object} filter
* @param {any} meta (optional).
*
*/
getOrganizations: class implements Action {
public readonly type = MessageActionTypes.GET_ORGANIZATIONS;
public payload: {id: any, filter: LoopBackFilter};
constructor(id: any, filter: LoopBackFilter = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, filter};
}
},
/**
* getOrganizationsSuccess Action.
*
* @param {any} id
* @param {object[]} data
* @param {any} meta (optional).
*
*/
getOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.GET_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* getOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
getOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.GET_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* createOrganizations Action.
* Creates a new instance in Organizations of this model.
*
* @param {any} id Message id
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
createOrganizations: class implements Action {
public readonly type = MessageActionTypes.CREATE_ORGANIZATIONS;
public payload: {id: any, data: any};
constructor(id: any, data: any = {}, customHeaders?: Function, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createOrganizationsSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
createOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.CREATE_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
createOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.CREATE_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* deleteOrganizations Action.
* Deletes all Organizations of this model.
*
* @param {any} id Message id
* @param {any} meta (optional).
*
*/
deleteOrganizations: class implements Action {
public readonly type = MessageActionTypes.DELETE_ORGANIZATIONS;
constructor(public payload: any, public meta?: any) {}
},
/**
* deleteOrganizationsSuccess Action.
*
* @param {any} id
* This method returns no data.
* @param {any} meta (optional).
*
*/
deleteOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.DELETE_ORGANIZATIONS_SUCCESS;
constructor(public payload: any, public meta?: any) {}
},
/**
* deleteOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
deleteOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.DELETE_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* putSigfox_OldToRemove Action.
* <em>
* (The remote method definition does not provide any description.)
* </em>
*
* @param {object} data Request data.
*
* - `req` – `{object}` -
*
* - `data` – `{object}` -
* @param {any} meta (optional).
*
*/
putSigfox_OldToRemove: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX__OLD_TO_REMOVE;
public payload: {req: any, data: any};
constructor(req: any = {}, data: any, customHeaders?: Function, public meta?: any) {
this.payload = {req, data};
}
},
/**
* putSigfox_OldToRemoveSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
putSigfox_OldToRemoveSuccess: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX__OLD_TO_REMOVE_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* putSigfox_OldToRemoveFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
putSigfox_OldToRemoveFail: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX__OLD_TO_REMOVE_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* putSigfox Action.
* <em>
* (The remote method definition does not provide any description.)
* </em>
*
* @param {object} data Request data.
*
* - `req` – `{object}` -
*
* - `data` – `{object}` -
* @param {any} meta (optional).
*
*/
putSigfox: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX;
public payload: {req: any, data: any};
constructor(req: any = {}, data: any, customHeaders?: Function, public meta?: any) {
this.payload = {req, data};
}
},
/**
* putSigfoxSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
putSigfoxSuccess: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* putSigfoxFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
putSigfoxFail: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* putSigfoxAcknowledge Action.
* <em>
* (The remote method definition does not provide any description.)
* </em>
*
* @param {object} data Request data.
*
* - `req` – `{object}` -
*
* - `data` – `{object}` -
* @param {any} meta (optional).
*
*/
putSigfoxAcknowledge: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX_ACKNOWLEDGE;
public payload: {req: any, data: any};
constructor(req: any = {}, data: any, customHeaders?: Function, public meta?: any) {
this.payload = {req, data};
}
},
/**
* putSigfoxAcknowledgeSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
putSigfoxAcknowledgeSuccess: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX_ACKNOWLEDGE_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* putSigfoxAcknowledgeFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
putSigfoxAcknowledgeFail: class implements Action {
public readonly type = MessageActionTypes.PUT_SIGFOX_ACKNOWLEDGE_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* postSigfoxStatus Action.
* <em>
* (The remote method definition does not provide any description.)
* </em>
*
* @param {object} data Request data.
*
* - `req` – `{object}` -
*
* - `data` – `{object}` -
* @param {any} meta (optional).
*
*/
postSigfoxStatus: class implements Action {
public readonly type = MessageActionTypes.POST_SIGFOX_STATUS;
public payload: {req: any, data: any};
constructor(req: any = {}, data: any, customHeaders?: Function, public meta?: any) {
this.payload = {req, data};
}
},
/**
* postSigfoxStatusSuccess Action.
*
* @param {any} id
* @param {object} data
* @param {any} meta (optional).
*
*/
postSigfoxStatusSuccess: class implements Action {
public readonly type = MessageActionTypes.POST_SIGFOX_STATUS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* postSigfoxStatusFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
postSigfoxStatusFail: class implements Action {
public readonly type = MessageActionTypes.POST_SIGFOX_STATUS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* createManyGeolocs Action.
* Creates a new instance in Geolocs of this model.
*
* @param {any} id Message id
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
createManyGeolocs: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_GEOLOCS;
public payload: {id: any, data: any[]};
constructor(id: any, data: any[] = [], customHeaders?: Function, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createManyGeolocsSuccess Action.
*
* @param {any} id
* @param {object[]} data
* @param {any} meta (optional).
*
*/
createManyGeolocsSuccess: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_GEOLOCS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createManyGeolocsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
createManyGeolocsFail: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_GEOLOCS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
/**
* createManyOrganizations Action.
* Creates a new instance in Organizations of this model.
*
* @param {any} id Message id
* @param {object} data Request data.
*
* This method expects a subset of model properties as request parameters.
* @param {any} meta (optional).
*
*/
createManyOrganizations: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_ORGANIZATIONS;
public payload: {id: any, data: any[]};
constructor(id: any, data: any[] = [], customHeaders?: Function, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createManyOrganizationsSuccess Action.
*
* @param {any} id
* @param {object[]} data
* @param {any} meta (optional).
*
*/
createManyOrganizationsSuccess: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_ORGANIZATIONS_SUCCESS;
public payload: {id: any, data: any};
constructor(id: any, data: any, public meta?: any) {
this.payload = {id, data};
}
},
/**
* createManyOrganizationsFail Action.
*
* @param {any} payload
* @param {any} meta (optional).
*
*/
createManyOrganizationsFail: class implements Action {
public readonly type = MessageActionTypes.CREATE_MANY_ORGANIZATIONS_FAIL;
constructor(public payload: any, public meta?: any) { }
},
}); |
<reponame>d4l3k/tsml
import json
import numpy
import gzip
import tensorflow as tf
with gzip.open('tmp/data.json.gz', 'rb') as f:
data = json.load(f)
indicators = data['indicators']
trainX = numpy.asarray(data['trainX'])
trainy = [
{
int(k): v
for k, v in item.items()
}
for item in data['trainy']
]
print(indicators)
print('Num training examples = {}'.format(len(trainy)))
print('Building network...')
# Parameters
learning_rate = 0.01
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_inputs = len(indicators) * 2
num_outputs = len(indicators)
# Define the neural network
def neural_net(x):
regularizer = tf.contrib.layers.l2_regularizer(0.1)
# Hidden fully connected layer with 256 neurons
layer_1 = tf.layers.dense(x, n_hidden_1, kernel_regularizer=regularizer)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.layers.dense(layer_1, n_hidden_2, kernel_regularizer=regularizer)
# Output fully connected layer with a neuron for each indicator
return [
tf.layers.dense(layer_2, 1, kernel_regularizer=regularizer)
for indicator in indicators
]
means = numpy.mean(trainX, axis=0)
variances = numpy.var(trainX, axis=0)
print(means, variances)
# Build the neural network
x = tf.placeholder(tf.float32, [1, num_inputs])
y = tf.placeholder(tf.float32)
inputs = (x - means) / variances
outputs = neural_net(inputs)
losses = [
tf.losses.mean_squared_error(output, y) +
tf.losses.get_regularization_loss()
for i, output in enumerate(outputs)
]
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
trains = [
optimizer.minimize(l)
for l in losses
]
def print_errors(sum, num):
for i, n in enumerate(num):
if n > 0:
print(' - {}: {}'.format(indicators[i], sum[i]/n))
print("Training...")
iterations = 20000
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for k in range(iterations):
if k % 100 == 0:
if k > 0:
print('iteration {} of {} examples'.format(k, len(trainX)))
print_errors(errors, error_counts)
errors = numpy.zeros(len(indicators))
error_counts = numpy.zeros(len(indicators))
i = numpy.random.randint(0, len(trainy))
yi = trainy[i]
xi = numpy.resize(trainX[i], [1, num_inputs])
for j, v in yi.items():
train = trains[j]
loss = losses[j]
feed_dict = {
x: xi,
y: numpy.float32((v-means[j*2])/variances[j*2])
}
val = sess.run([train, loss], feed_dict=feed_dict)
"""
print(j, val[1], feed_dict)
if k > 10:
exit()
"""
errors[j] += float(val[1])
error_counts[j] += 1
print_errors(errors, error_counts)
|
package com.bxd.day07;
public class AbsTest {
public static void main(String[] args) {
// TODO Auto-generated method stub
AbstractClassTest a = new AbstractClassTest();
a.doPrint();
a.study();
a.show();
a.work();
//int n=a.NUM;
//System.out.println(n);
}
}
|
Pandora previewed its long-awaited Apple Music and Spotify competitor today at a special event with select publications. The new service is called Pandora Premium, and will launch in early 2017 with a likely price of $9.99 per month, reports Engadget.The new app offers on-demand access to a large music library and looks a lot like Rdio. In November, Pandora announced that it had acquired "key assets" and employees from Rdio. Like other services, Pandora Premium will also allow users to save music offline and experience ad-free listening, reports The Verge.Pandora CEO Tim Westergen thinks the company has created the "first truly premium music service." For Pandora, a premium music service means a personal music service, and the company hopes to leverage its trove of listening data and the Music Genome Project to offer each customer a personalized music service.For instance, Pandora Premium features personalized search, which means each user will get different music results based on their listening history rather than overall popularity. The browse and new release sections of the app will also be personalized based on user taste. Smart playlists will allow users to easily add new songs with a touch of a button, and in some cases Pandora will automatically add songs for you.The app will also change color based on the album artwork of the song you're currently listening to, and every song you like will be added to a giant playlist made up of every song you've ever liked on Pandora. When a user has reached the end of a playlist or album, Pandora Premium will offer a radio station based on the finished playlist or album to keep the music going.Overall, Engadget notes that the new service marries Rdio's interface and features with Pandora's extensive music knowledge. Pandora says the service will begin rolling out in the first quarter of 2017, but won't commit to whether the service will cost $9.99 like similar music streaming services.Pandora Premium gives Pandora three music offerings at different price points: the basic, ad-supported radio streaming service, the $4.99 per month Pandora Plus , an ad-free streaming service, and the on-demand newly announced Pandora Premium. |
/**
* Parse image name
*
* @v text Text
* @ret image Image
* @ret rc Return status code
*/
int parse_image ( const char *text, struct image **image ) {
assert ( text != NULL );
*image = find_image ( text );
if ( ! *image ) {
printf ( "\"%s\": no such image\n", text );
return -ENOENT;
}
return 0;
} |
<filename>src/slogo/model/ASTNodes/ASTCommand.java
package slogo.model.ASTNodes;
import java.util.List;
import slogo.exceptions.IncorrectParameterCountException;
import slogo.model.InfoBundle;
/**
* Base class of commands, subclasses need to implement {@link ASTCommand#doEvaluate(InfoBundle,
* List)}. Subclasses can also override {@link ASTCommand#preEvaluate(InfoBundle)} to include
* pre-evaluation checks
*
* @author <NAME>, <NAME>
*/
public abstract class ASTCommand extends ASTNamed {
protected int numParams;
/**
* Creates a new instance that contains the name and the number of parameters
*/
public ASTCommand(String name, int numParams) {
super(name);
this.numParams = numParams;
}
@Override
public int getNumParams() {
return numParams;
}
@Override
protected void preEvaluate(InfoBundle info) {
}
@Override
public int addChild(ASTNode newChild) throws IncorrectParameterCountException {
super.addChild(newChild);
return getNumChildren();
}
@Override
public boolean isDone() {
return numParams <= getNumChildren();
}
}
|
Anna Bergendahl
Idol 2008
Bergendahl successfully applied for the TV4 talent show Idol 2008 with her rendition of Bonnie Raitt's Have a Heart which received praises from the jury which consisted of Laila Bagge, Anders Bagge and Andreas Carlsson. She later sang songs like ABBA's Mamma Mia, Py Bäckman's Stad i ljus, Save Up All Your Tears, Bleeding Love by Leona Lewis, The Best and Over the Rainbow.
Bergendahl finished fifth in the Idol season which was later won by Kevin Borg.
In 2009, Bergendahl was approached by Kristian Lagerström and Bobby Ljunggren and asked if she wanted to record a song for Melodifestivalen 2010.
Melodifestivalen and Eurovision
Anna Bergendahl participated in Melodifestivalen 2010 with the song "This Is My Life", written and produced by Kristian Lagerström (lyrics) and Bobby Ljunggren (music). The song won the final in Ericsson Globe on 13 March 2010, with 214 points. Bergendahl represented Sweden at the Eurovision Song Contest 2010 in Oslo, Norway, where she became the first Swedish singer not to qualify for the final since the introduction of the semifinals in 2004 (although she placed 11th, just outside the top 10 qualifiers by a margin of 5 points). "This Is My Life'" was the first ballad to win Melodifestivalen since 1998 when Kärleken är won and was also Sweden's 50th entry in the Eurovision Song Contest.
2010: After Eurovision
After Eurovision Bergendahl participated in Allsång på Skansen, Sommarkrysset, and Lotta på Liseberg. She also went on a tour and sang songs from her debut album. In 2012 Bergendahl released her new album Something to Believe In with the debut single from the new album being "Live and Let Go". She participated in Melodifestivalen 2019 with the song "Ashes to Ashes". |
<reponame>fes300/DefinitelyTyped
import { CallbackType, CharsetType, LoadCallbackType } from '../types';
import { DecodedString } from './StringUtils';
export default class MediaFileReader {
_isInitialized: boolean;
_size: number;
constructor(path?: any);
static canReadFile(file: any): boolean;
init(callbacks: LoadCallbackType): void;
_init(callbacks: LoadCallbackType): void;
loadRange(range: [number, number], callbacks: LoadCallbackType): void;
getSize(): number;
getByteAt(offset: number): number;
getBytesAt(offset: number, length: number): number[];
isBitSetAt(offset: number, bit: number): boolean;
getSByteAt(offset: number): number;
getShortAt(offset: number, isBigEndian: boolean): number;
getSShortAt(offset: number, isBigEndian: boolean): number;
getLongAt(offset: number, isBigEndian: boolean): number;
getSLongAt(offset: number, isBigEndian: boolean): number;
getInteger24At(offset: number, isBigEndian: boolean): number;
getStringAt(offset: number, length: number): string;
getStringWithCharsetAt(
offset: number,
length: number,
charset?: CharsetType,
): DecodedString;
getCharAt(offset: number): string;
getSynchsafeInteger32At(offset: number): number;
}
|
/* -*- Mode: C++; tab-width: 8; c-basic-offset: 2; indent-tabs-mode: nil; -*- */
#ifndef RR_MAIN_H_
#define RR_MAIN_H_
#include <string>
#include <vector>
void assert_prerequisites(bool use_syscall_buffer = false);
void check_performance_settings();
void print_usage(FILE*);
bool parse_global_option(std::vector<std::string>& args);
#endif // RR_MAIN_H_
|
Efficacy of anticoagulants and platelet inhibitors in cancer-induced thrombosis The efficacy of anticoagulants, low-molecular-weight heparins (LMWHs), the antiplatelet glycoprotein IIb/IIIa antagonist, or combinations on cancer-activated thrombosis was determined using thromboelastography. The LMWHs tinzaparin and enoxaparin (0.179, 1.79, 17.9 g) were incubated in human citrated whole blood (n = 4) and then activated by calcium chloride (11 mmol/l) or Colo205 (cell count 105). Concentrations of 9.9, 17.9 and 179 g glycoprotein IIb/IIIa antagonist, XV454, and combinations with each LMWH were carried out and activated under the same conditions. The experiment was repeated with tissue factor substituting for the Colo205 to induce platelet/fibrin clot formation. Parameters tested in the thrombelastography analysis included clotting time, rate of clot formation due to fibrin formation, clot kinetics, and clot strength related to platelet count (maximum amplitude). Tinzaparin (1.79 g), enoxaparin (1.79 g), and XV454 (17.9 g) significantly reduced the angle by 64, 26 and 27%, respectively, in cancer-induced clotting. Significant reductions in the maximum amplitude occurred in tinzaparin 1.79 g (31%), enoxaparin 1.79 g (11%), and XV454 17.9 g (59%). An overall antithrombotic additive effect occurred when each LMWH (1.79 g) was combined with XV454 (17.9 g). The results between cancer-activated and tissue factor-activated blood were similar. The study concludes that an additive effect is present between LMWHs and a glycoprotein IIb/IIIa antagonist in reducing cancer-mediated thrombosis. |
package org.colocation.bestEffort;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.core.CloudSim;
import org.colocation.Program;
import org.workflowsim.Task;
import java.util.*;
/**
* Created by wkj on 2019/3/12.
*/
public class ColocationJob extends ColocationTask{
private double jobStartTime;
private double jobEndTime;
private List<ColocationTask> rootTask;
private Map<String, ColocationTask> taskMap;
public ColocationJob(String jobName, final int jobId, double cpuQuota, int ramQuota, double memBWQuota, String
DatacenterName, int userID) {
super(jobName, jobName, jobId, cpuQuota, ramQuota, memBWQuota, DatacenterName, userID, -1, new Program(), null);
this.taskMap = new HashMap<>();
this.rootTask = new ArrayList<>();
}
public List<ColocationTask> getRootTaskList() {
return rootTask;
}
public void addRootTask(ColocationTask t) {
this.rootTask.add(t);
}
public void setJobName(String jobName) {
super.setTaskName(jobName);
super.setJobName(jobName);
//reset all tasks' name
for (Map.Entry<String, ColocationTask> entry : taskMap.entrySet()) {
ColocationTask task = entry.getValue();
task.setJobName(jobName);
}
}
public String getJobName(){
return super.getJobName();
}
public void addTask(ColocationTask t) {
//Log.printLine("add task:"+t.getTaskName());
this.taskMap.put(t.getTaskName(), t);
}
public ColocationTask getTask(String taskName) {
return this.taskMap.get(taskName);
}
@Override
public void setProgressReporter(int progressReporter) {
super.setProgressReporter(progressReporter);
taskMap.entrySet();
for (Map.Entry<String, ColocationTask> entry : taskMap.entrySet()) {
ColocationTask task = entry.getValue();
task.setProgressReporter(progressReporter);
}
}
public double getJobStartTime() {
return jobStartTime;
}
public void setJobStartTime(double jobStartTime) {
this.jobStartTime = jobStartTime;
}
public double getJobEndTime() {
return jobEndTime;
}
public void setJobEndTime(double jobEndTime) {
this.jobEndTime = jobEndTime;
}
@Override
public List getParentList() {
return super.getParentList();
}
public List<Task> getAllTasks(){
List<Task> res = new ArrayList<>();
Queue<Task> queue = new LinkedList<>();
for (int i = 0; i < this.rootTask.size(); i++) {
queue.add(this.rootTask.get(i));
}
while (queue.size()>0) {
Task i = queue.remove();
if (!res.contains(i)) {
res.add(i);
}
for (Task t : i.getChildList()){
if ( !queue.contains(t) ){
queue.add(t);
}
}
}
return res;
}
public void updateStatus(){
List<Task> tasks = this.getAllTasks() ;
boolean allSuccess = true;
boolean hasTaskFailed = false;
boolean hasExec = false;
try {
for( Task t : tasks) {
switch (t.getStatus()) {
case FAILED:
allSuccess = false;
hasTaskFailed = true;
break;
case INEXEC:
allSuccess = false;
hasExec = true;
break;
case SUCCESS:
break;
default:
allSuccess = false;
}
}
if (hasTaskFailed) {
this.setCloudletStatus(FAILED);
}
if (allSuccess) {
this.setCloudletStatus(SUCCESS);
double now = CloudSim.clock();
this.setJobEndTime(now);
}
if (hasExec){
this.setCloudletStatus(INEXEC);
}
} catch (Exception e) {
Log.printLine("update job status error:"+ e.toString());
e.printStackTrace();
}
}
}
|
Cops from the 63rd Precinct will be offering residents a chance to better secure their cars, bicycles and computers on April 11 during a special crime prevention day at Marine Park.
From 10 am to 3 pm, police will be available to enroll residents to etch codes on to cars, bikes, laptops, cell phones and other electronics and valuables.
Each item will be given a special number that will be helpful in getting the item back if its ever stolen and found by police, officials said.
Crime Prevention Day at Marine Park [Avenue U parking lot between Stuart and East 33rd streets in Marine Park, (718) 258-4411] on April 11, 10 am to 3 pm. |
Corporate branding and brand architecture: a conceptual framework This paper examines the relationships between product and corporate brands with a view to clarifying the role and function of corporate branding in the context of different brand architectures. Through the prism of rebranding decisions, brand architecture is analysed as an evolutionary strategy decision. Two broad strategies are identified: an integration strategy which seeks to achieve image alignment between corporate and product brands; and a separation strategy which seeks to shape different images for different stakeholders. Implementing these strategies within the context of the brand architecture, we introduce the concept of `ascending and descending brand extension', which leverages the strong image of the corporate brand to enhance the image and credibility of the product brand and vice versa. Based on this analysis, we propose three types of corporate branding strategy within the brand architecture framework: the `trade name', which is a basic identity over a house of brands; the `business brand', which is consciously nurtured and is aimed primarily at stakeholders other than consumers; and, finally, the `holistic corporate brand' is a fully developed corporate brand, extending across all target audiences. |
<filename>src/org/ddogleg/util/PrimitiveArrays.java
/*
* Copyright (c) 2012-2020, <NAME>. All Rights Reserved.
*
* This file is part of DDogleg (http://ddogleg.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ddogleg.util;
import org.ddogleg.struct.DogArray_I32;
import org.ddogleg.struct.DogArray_I8;
import org.jetbrains.annotations.Nullable;
import java.util.Random;
/**
* Various functions for manipulating primitive arrays
*
* @author <NAME>
*/
public class PrimitiveArrays {
/**
* Finds the itersection of two sets. Uses a algorithm that requires linear time and memory. Manually
* determines the min and max values contained in both sets.
*
* @param setA Set A of integers. Unsorted.
* @param sizeA Number of elements in set A
* @param setB Set B of integers. Unsorted.
* @param sizeB Number of elements in set B
* @param work Work space
* @param results Output set that is the intersection. Sorted from least to greatest
*/
public static void intersection( int[] setA, int sizeA,
int[] setB, int sizeB,
DogArray_I32 results,
@Nullable DogArray_I8 work ) {
// Handling the pathological case where enables safely accessing the first element in setA
if (sizeA == 0 || sizeB == 0) {
results.reset();
return;
}
// Set the min/max to an actual element. This enables if else to be used below.
int min = setA[0];
int max = min;
// Exhaustively search to find the minimum and maximum values
for (int i = 1; i < sizeA; i++) {
int v = setA[i];
if (v < min)
min = v;
else if (v > max)
max = v;
}
for (int i = 0; i < sizeB; i++) {
int v = setB[i];
if (v < min)
min = v;
else if (v > max)
max = v;
}
intersection(setA, sizeA, setB, sizeB, min, max, results, work);
}
/**
* Finds the intersection of two sets. Uses a algorithm that requires linear time and memory.
*
* @param setA Set A of integers. Unsorted.
* @param sizeA Number of elements in set A
* @param setB Set B of integers. Unsorted.
* @param sizeB Number of elements in set B
* @param valueMin Minimum value in either set
* @param valueMax Maximum value in either set
* @param work Work space
* @param results Output set that is the intersection. Sorted from least to greatest
*/
public static void intersection( int[] setA, int sizeA,
int[] setB, int sizeB,
int valueMin, int valueMax,
DogArray_I32 results,
@Nullable DogArray_I8 work ) {
work = countOccurrences(setA, sizeA, setB, sizeB, valueMin, valueMax, results, work);
for (int i = 0; i < work.size; i++) {
if (work.data[i] != 2)
continue;
results.add(i + valueMin);
}
}
/**
* Finds the intersection of two sets. Uses a algorithm that requires linear time and memory.
*
* @param setA Set A of integers. Unsorted.
* @param sizeA Number of elements in set A
* @param setB Set B of integers. Unsorted.
* @param sizeB Number of elements in set B
* @param valueMin Minimum value in either set
* @param valueMax Maximum value in either set
* @param work Work space
* @param results Output set that is the intersection. Sorted from least to greatest
*/
public static void union( int[] setA, int sizeA,
int[] setB, int sizeB,
int valueMin, int valueMax,
DogArray_I32 results,
@Nullable DogArray_I8 work ) {
work = countOccurrences(setA, sizeA, setB, sizeB, valueMin, valueMax, results, work);
for (int i = 0; i < work.size; i++) {
if (work.data[i] == 0)
continue;
results.add(i + valueMin);
}
}
private static DogArray_I8 countOccurrences( int[] setA, int sizeA,
int[] setB, int sizeB,
int valueMin, int valueMax,
DogArray_I32 results,
@Nullable DogArray_I8 work ) {
results.reset();
results.reserve(Math.min(sizeA, sizeB));
if (work == null)
work = new DogArray_I8(valueMax - valueMin + 1);
work.reset();
work.resize(valueMax - valueMin + 1, (byte)0);
for (int i = 0; i < sizeA; i++) {
work.data[setA[i] - valueMin]++;
}
for (int i = 0; i < sizeB; i++) {
work.data[setB[i] - valueMin]++;
}
return work;
}
/**
* Sets each element within range to a number counting up
*/
public static void fillCounting( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
for (int i = 0; i < length; i++) {
array[i + offset] = i;
}
}
public static int[] fillCounting( int length ) {
int[] array = new int[length];
fillCounting(array, 0, length);
return array;
}
/**
* Randomly shuffle the array
*/
public static void shuffle( byte[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
byte tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Randomly shuffle the array
*/
public static void shuffle( short[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
short tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Randomly shuffle the array
*/
public static void shuffle( int[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
int tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Randomly shuffle the array
*/
public static void shuffle( long[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
long tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Randomly shuffle the array
*/
public static void shuffle( float[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
float tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Randomly shuffle the array
*/
public static void shuffle( double[] array, int offset, int length, Random rand ) {
sanityCheckShuffle(array.length, offset, length);
for (int i = 0; i < length; i++) {
int src = rand.nextInt(length - i);
double tmp = array[offset + src + i];
array[offset + src + i] = array[offset + i];
array[offset + i] = tmp;
}
}
/**
* Returns the value of the element with the minimum value
*/
public static int min( byte[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
byte min = array[offset];
for (int i = 1; i < length; i++) {
byte tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the value of the element with the minimum value
*/
public static int min( short[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
short min = array[offset];
for (int i = 1; i < length; i++) {
short tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the value of the element with the minimum value
*/
public static int min( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
int min = array[offset];
for (int i = 1; i < length; i++) {
int tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the value of the element with the minimum value
*/
public static long min( long[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
long min = array[offset];
for (int i = 1; i < length; i++) {
long tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the value of the element with the minimum value
*/
public static float min( float[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
float min = array[offset];
for (int i = 1; i < length; i++) {
float tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the value of the element with the minimum value
*/
public static double min( double[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double min = array[offset];
for (int i = 1; i < length; i++) {
double tmp = array[offset + i];
if (tmp < min) {
min = tmp;
}
}
return min;
}
/**
* Returns the index of the element with the minimum value
*/
public static int minIdx( byte[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
byte min = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
byte tmp = array[offset + i];
if (tmp < min) {
min = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the index of the element with the minimum value
*/
public static int minIdx( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
int min = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
int tmp = array[offset + i];
if (tmp < min) {
min = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the index of the element with the minimum value
*/
public static int minIdx( float[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
float min = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
float tmp = array[offset + i];
if (tmp < min) {
min = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the index of the element with the minimum value
*/
public static int minIdx( double[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double min = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
double tmp = array[offset + i];
if (tmp < min) {
min = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the value of the element with the maximum value
*/
public static int max( byte[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
byte max = array[offset];
for (int i = 1; i < length; i++) {
byte tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static int max( short[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
short max = array[offset];
for (int i = 1; i < length; i++) {
short tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static int max( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
int max = array[offset];
for (int i = 1; i < length; i++) {
int tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static long max( long[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
long max = array[offset];
for (int i = 1; i < length; i++) {
long tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static float max( float[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
float max = array[offset];
for (int i = 1; i < length; i++) {
float tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static double max( double[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double max = array[offset];
for (int i = 1; i < length; i++) {
double tmp = array[offset + i];
if (tmp > max) {
max = tmp;
}
}
return max;
}
/**
* Returns the value of the element with the maximum value
*/
public static int maxIdx( byte[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
byte max = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
byte tmp = array[offset + i];
if (tmp > max) {
max = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the value of the element with the maximum value
*/
public static int maxIdx( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
int max = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
int tmp = array[offset + i];
if (tmp > max) {
max = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the value of the element with the maximum value
*/
public static int maxIdx( float[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
float max = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
float tmp = array[offset + i];
if (tmp > max) {
max = tmp;
index = i;
}
}
return offset + index;
}
/**
* Returns the value of the element with the maximum value
*/
public static int maxIdx( double[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double max = array[offset];
int index = 0;
for (int i = 1; i < length; i++) {
double tmp = array[offset + i];
if (tmp > max) {
max = tmp;
index = i;
}
}
return offset + index;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBound( byte[] array, int offset, int length, int val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if (array[idx] < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param array unsigned byte array
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBoundU( byte[] array, int offset, int length, int val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if ((array[idx] & 0xFF) < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBound( short[] array, int offset, int length, int val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if (array[idx] < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBound( int[] array, int offset, int length, int val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if (array[idx] < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBound( float[] array, int offset, int length, float val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if (array[idx] < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Finds the first index in 'array' for which val is not ≤ val
*
* @param offset First index in the array
* @param length Number of elements in the array
* @param val The value for which the lower bound is being searched for.
* @return lower bound index
*/
public static int lowerBound( double[] array, int offset, int length, double val ) {
sanityCheckRange(array.length, offset, length);
int count = length;
int first = offset;
while (count > 0) {
int step = count/2;
int idx = first + step;
if (array[idx] < val) {
first = idx + 1;
count -= step + 1;
} else {
count = step;
}
}
return first;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( byte[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( short[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( int[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( long[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( float[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Computes the sum of the array and stores the result in a double
*/
public static double sumD( double[] array, int offset, int length ) {
sanityCheckRange(array.length, offset, length);
double sum = 0.0;
for (int i = 0; i < length; i++) {
sum += array[offset + i];
}
return sum;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( byte[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( short[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( int[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( long[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( float[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
/**
* Recursively computes a result from an array. Previous results are feedback into the current value being
* considered.
*/
public static double feedbackIdxDOp( double[] array, int offset, int length, FeedbackIdxD op ) {
sanityCheckRange(array.length, offset, length);
double result = 0.0;
for (int i = 0; i < length; i++) {
result = op.process(i, array[i + offset], result);
}
return result;
}
private static void sanityCheckRange( int arrayLength, int offset, int length ) {
if (length <= 0)
throw new IllegalArgumentException("length must be positive. length=" + length);
if (offset < 0 || offset >= arrayLength)
throw new IllegalArgumentException("offset is invalid. offset=" + offset);
}
private static void sanityCheckShuffle( int arrayLength, int offset, int length ) {
if (length < 0)
throw new IllegalArgumentException("length must not be negative. length=" + length);
if (offset < 0 || offset > arrayLength)
throw new IllegalArgumentException("offset is invalid. offset=" + offset);
}
@FunctionalInterface
public interface FeedbackIdxD {
double process( int idx, double value, double previous );
}
}
|
Basic Tools for Fuzzy Modeling The lesson will begin with the basics of fuzzy set theory. Fuzzy set theory was first introduced in 1965 by Lotfi A. Zadeh . It may be regarded both as a generalization of classical set theory and as a generalization of dual logic. In knowledge-based methods, fuzzy sets are employed primarily to carry out the formal, content-defined mapping of human knowledge. This makes it possible to process human empirical knowledge with electronic dataprocessing systems. Clustering procedures belong to the algorithmic methods of data analysis. The first aim of clustering is to find structures contained within groups of data. These structures are usually classes to which objects from the data set are assigned. The result of the classification process is usually used as a classifier. Classical clustering assigns each object to exactly one class, whereas in fuzzy clustering the objects are assigned different degrees of membership to the different classes. Traffic data (speed and traffic volume) are measured by inductive loops on the motorway and is forwarded to the Traffic Control Center TCC. The major goal of the TCC is to analyse the traffic situation as basis for control and traffic information services. For that goal the detected trafic data are fuzzified and aggegated by fuzzy knowledge-based methods to classify the traffic situation by linguistic variables like congested, dense or free flow. |
Defense Mechanisms and Major Depressive Disorder in African American Women This study explored differences in defense use between a group of predominantly African American women diagnosed with Major Depressive Disorder (MDD; n = 20) and a healthy control sample (n = 20), both from a primary care medical clinic. Patients completed the Patient Health Questionnaire to assess DSM-IV diagnoses and underwent video-recorded interviews, which were assessed for defenses using the Defensive Functioning Scale from the DSM-IV. Groups were compared for differences in overall defensive functioning, defense levels, and individual defenses using independent samples t tests. Results showed that the MDD group scored higher on mental inhibition, minor image distorting, and major image distorting defense levels as well as the individual defenses devaluation, dissociation, and isolation. The control group scored higher on the overall defensive functioning and the individual defense anticipation. The results also showed a trend toward the MDD group scoring higher on the disavowal defense level and the individual defense splitting. |
<filename>ExtractedJars/iRobot_com.irobot.home/javafiles/com/mixpanel/android/b/o$j.java
// Decompiled by Jad v1.5.8g. Copyright 2001 <NAME>.
// Jad home page: http://www.kpdus.com/jad.html
// Decompiler options: packimports(3) annotate safe
package com.mixpanel.android.b;
import android.graphics.Bitmap;
import android.graphics.drawable.BitmapDrawable;
import android.view.View;
import java.util.*;
// Referenced classes of package com.mixpanel.android.b:
// o, a
public static class o$j extends o
{
public void a()
{
Iterator iterator = c.entrySet().iterator();
// 0 0:aload_0
// 1 1:getfield #33 <Field WeakHashMap c>
// 2 4:invokevirtual #40 <Method Set WeakHashMap.entrySet()>
// 3 7:invokeinterface #46 <Method Iterator Set.iterator()>
// 4 12:astore_1
do
{
if(!iterator.hasNext())
break;
// 5 13:aload_1
// 6 14:invokeinterface #52 <Method boolean Iterator.hasNext()>
// 7 19:ifeq 76
Object obj = ((Object) ((java.util.Entry)iterator.next()));
// 8 22:aload_1
// 9 23:invokeinterface #56 <Method Object Iterator.next()>
// 10 28:checkcast #58 <Class java.util.Map$Entry>
// 11 31:astore_3
View view = (View)((java.util.Entry) (obj)).getKey();
// 12 32:aload_3
// 13 33:invokeinterface #61 <Method Object java.util.Map$Entry.getKey()>
// 14 38:checkcast #63 <Class View>
// 15 41:astore_2
obj = ((java.util.Entry) (obj)).getValue();
// 16 42:aload_3
// 17 43:invokeinterface #66 <Method Object java.util.Map$Entry.getValue()>
// 18 48:astore_3
if(obj != null)
//* 19 49:aload_3
//* 20 50:ifnull 13
{
d[0] = obj;
// 21 53:aload_0
// 22 54:getfield #26 <Field Object[] d>
// 23 57:iconst_0
// 24 58:aload_3
// 25 59:aastore
a.a(view, d);
// 26 60:aload_0
// 27 61:getfield #20 <Field a a>
// 28 64:aload_2
// 29 65:aload_0
// 30 66:getfield #26 <Field Object[] d>
// 31 69:invokevirtual #71 <Method Object a.a(View, Object[])>
// 32 72:pop
}
} while(true);
// 33 73:goto 13
// 34 76:return
}
public void a(View view)
{
if(b != null)
//* 0 0:aload_0
//* 1 1:getfield #22 <Field a b>
//* 2 4:ifnull 199
{
Object aobj[] = a.a();
// 3 7:aload_0
// 4 8:getfield #20 <Field a a>
// 5 11:invokevirtual #75 <Method Object[] a.a()>
// 6 14:astore_2
if(1 == aobj.length)
//* 7 15:iconst_1
//* 8 16:aload_2
//* 9 17:arraylength
//* 10 18:icmpne 199
{
Object obj1 = aobj[0];
// 11 21:aload_2
// 12 22:iconst_0
// 13 23:aaload
// 14 24:astore_3
Object obj = b.a(view);
// 15 25:aload_0
// 16 26:getfield #22 <Field a b>
// 17 29:aload_1
// 18 30:invokevirtual #78 <Method Object a.a(View)>
// 19 33:astore_2
if(obj1 == obj)
//* 20 34:aload_3
//* 21 35:aload_2
//* 22 36:if_acmpne 40
return;
// 23 39:return
if(obj1 != null)
//* 24 40:aload_3
//* 25 41:ifnull 127
if((obj1 instanceof Bitmap) && (obj instanceof Bitmap))
//* 26 44:aload_3
//* 27 45:instanceof #80 <Class Bitmap>
//* 28 48:ifeq 73
//* 29 51:aload_2
//* 30 52:instanceof #80 <Class Bitmap>
//* 31 55:ifeq 73
{
if(((Bitmap)obj1).sameAs((Bitmap)obj))
//* 32 58:aload_3
//* 33 59:checkcast #80 <Class Bitmap>
//* 34 62:aload_2
//* 35 63:checkcast #80 <Class Bitmap>
//* 36 66:invokevirtual #84 <Method boolean Bitmap.sameAs(Bitmap)>
//* 37 69:ifeq 127
return;
// 38 72:return
} else
if((obj1 instanceof BitmapDrawable) && (obj instanceof BitmapDrawable))
//* 39 73:aload_3
//* 40 74:instanceof #86 <Class BitmapDrawable>
//* 41 77:ifeq 118
//* 42 80:aload_2
//* 43 81:instanceof #86 <Class BitmapDrawable>
//* 44 84:ifeq 118
{
obj1 = ((Object) (((BitmapDrawable)obj1).getBitmap()));
// 45 87:aload_3
// 46 88:checkcast #86 <Class BitmapDrawable>
// 47 91:invokevirtual #90 <Method Bitmap BitmapDrawable.getBitmap()>
// 48 94:astore_3
Bitmap bitmap = ((BitmapDrawable)obj).getBitmap();
// 49 95:aload_2
// 50 96:checkcast #86 <Class BitmapDrawable>
// 51 99:invokevirtual #90 <Method Bitmap BitmapDrawable.getBitmap()>
// 52 102:astore 4
if(obj1 != null && ((Bitmap) (obj1)).sameAs(bitmap))
//* 53 104:aload_3
//* 54 105:ifnull 127
//* 55 108:aload_3
//* 56 109:aload 4
//* 57 111:invokevirtual #84 <Method boolean Bitmap.sameAs(Bitmap)>
//* 58 114:ifeq 127
return;
// 59 117:return
} else
if(obj1.equals(obj))
//* 60 118:aload_3
//* 61 119:aload_2
//* 62 120:invokevirtual #94 <Method boolean Object.equals(Object)>
//* 63 123:ifeq 127
return;
// 64 126:return
if(!(obj instanceof Bitmap) && !(obj instanceof BitmapDrawable) && !c.containsKey(((Object) (view))))
//* 65 127:aload_2
//* 66 128:instanceof #80 <Class Bitmap>
//* 67 131:ifne 199
//* 68 134:aload_2
//* 69 135:instanceof #86 <Class BitmapDrawable>
//* 70 138:ifne 199
//* 71 141:aload_0
//* 72 142:getfield #33 <Field WeakHashMap c>
//* 73 145:aload_1
//* 74 146:invokevirtual #97 <Method boolean WeakHashMap.containsKey(Object)>
//* 75 149:ifeq 155
//* 76 152:goto 199
{
d[0] = obj;
// 77 155:aload_0
// 78 156:getfield #26 <Field Object[] d>
// 79 159:iconst_0
// 80 160:aload_2
// 81 161:aastore
if(a.a(d))
//* 82 162:aload_0
//* 83 163:getfield #20 <Field a a>
//* 84 166:aload_0
//* 85 167:getfield #26 <Field Object[] d>
//* 86 170:invokevirtual #100 <Method boolean a.a(Object[])>
//* 87 173:ifeq 189
c.put(((Object) (view)), obj);
// 88 176:aload_0
// 89 177:getfield #33 <Field WeakHashMap c>
// 90 180:aload_1
// 91 181:aload_2
// 92 182:invokevirtual #104 <Method Object WeakHashMap.put(Object, Object)>
// 93 185:pop
else
//* 94 186:goto 199
c.put(((Object) (view)), ((Object) (null)));
// 95 189:aload_0
// 96 190:getfield #33 <Field WeakHashMap c>
// 97 193:aload_1
// 98 194:aconst_null
// 99 195:invokevirtual #104 <Method Object WeakHashMap.put(Object, Object)>
// 100 198:pop
}
}
}
a.a(view);
// 101 199:aload_0
// 102 200:getfield #20 <Field a a>
// 103 203:aload_1
// 104 204:invokevirtual #78 <Method Object a.a(View)>
// 105 207:pop
// 106 208:return
}
public volatile void b(View view)
{
super.b(view);
// 0 0:aload_0
// 1 1:aload_1
// 2 2:invokespecial #106 <Method void o.b(View)>
// 3 5:return
}
private final a a;
private final a b;
private final WeakHashMap c = new WeakHashMap();
private final Object d[] = new Object[1];
public o$j(List list, a a1, a a2)
{
super(list);
// 0 0:aload_0
// 1 1:aload_1
// 2 2:invokespecial #18 <Method void o(List)>
a = a1;
// 3 5:aload_0
// 4 6:aload_2
// 5 7:putfield #20 <Field a a>
b = a2;
// 6 10:aload_0
// 7 11:aload_3
// 8 12:putfield #22 <Field a b>
// 9 15:aload_0
// 10 16:iconst_1
// 11 17:anewarray Object[]
// 12 20:putfield #26 <Field Object[] d>
// 13 23:aload_0
// 14 24:new #28 <Class WeakHashMap>
// 15 27:dup
// 16 28:invokespecial #31 <Method void WeakHashMap()>
// 17 31:putfield #33 <Field WeakHashMap c>
// 18 34:return
}
}
|
Masochistic Personality Disorder: A Prototype Analysis of Diagnosis and Sex Bias The Work Group of the American Psychiatric Association to revise DSM-III in 1985 proposed a new personality disorder titled Masochistic Personality Disorder (MPD). This study concerns the clinical relevance and possible sex bias of MPD. The study was performed with clinicians who analyzed 15 case histories, five of which represented masochistic personality. The results led to the rejection of two hypotheses: a) masochistic personalities can be subsumed under existing DSM-III categories and b) there is a sex bias in the diagnostic use of MPD. |
Don’t get me wrong: in a purely substantive sense, the mockery is well deserved. But in a political sense, it’s not. Cantor’s tweet is almost comically shameless, but it’s also one of the reasons that Republicans continue to get credit for their economic policies even though their economic policies are routinely disastrous. It’s because they’re willing to be shameless and they don’t really care if anyone calls them on it.
Paul Ryan’s plan to shrink the federal government and gut Medicare is called…..”The Path to Prosperity.” Of course it is. Every Republican plan is called something like that. It’s shameless! The Reagan boom? All due to lower marginal tax rates, just like they predicted. The Clinton boom years? A delayed reaction to the Reagan era. Healthy corporate earnings in the aughts? All due to Republican reductions in capital gains taxes. Privatizing Social Security? It’s all about encouraging capital formation and growing the economy. Fighting bank regulation? They just want to reduce regulatory uncertainty and allow the economy to boom. Etc. etc. And there are always plenty of think tank analyses to back this stuff up with hard numbers.
It seems laughable, but it’s not. If you say that your policies are responsible for economic growth enough times, people will believe it. Nobody really understands this stuff, after all. And the more confidently and shamelessly you say it, the more believers you’ll have. So why shouldn’t Cantor claim that Republicans are responsible for all the job growth since January? Liberal bloggers will mock, but that’s nothing to be afraid of. Not as long as the steady stream of shamelessness keeps convincing people that Republican policies are putting us back on the right economic track. And it does. |
What Does it Take to Implement a Telemedicine Program in Your Home Care Agency? Using telemedicine-remote, two-way audio/ video visits-to augment traditional home care visits provides an added way to case manage patients in an efficient manner and often contributes to cost savings. This article will discuss many of the key points necessary to implement and case manage patients using telemedicine technology. Throughout this article, key tips will be provided that will assist your agency in getting through the labyrinth of initiating telemedicine technology. |
import sys
input=sys.stdin.readline
mod = 10**9+7
def nCr(fact, inv, n, r):
return fact[n] * inv[r] * inv[n-r] % mod
def main():
H,W,A,B = map(int,input().split())
fact = [1]
for i in range(1, H+W+1):
fact.append(fact[i-1] * i % mod)
inv = [0] * (H+W+1)
inv[H+W] = pow(fact[H+W], mod-2, mod) # フェルマーの小定理より a**(mod-2)はaの逆元
for i in range(H+W, 0, -1):
inv[i-1] = inv[i] * i % mod
ans = 0
for i in range(W-B):
n0 = H-A-1+B+i
r0 = H-A-1
n1 = A-1+W-B-1-i
r1 = A-1
ans += nCr(fact, inv, n0, r0) * nCr(fact, inv, n1, r1) % mod
print(ans%mod)
if __name__ == '__main__':
main()
|
<reponame>AldrichMascarenhas/SIH2017-Chemical-Inv-ADMIN
import { NgModule, Type } from '@angular/core';
import { BrowserModule, Title } from '@angular/platform-browser';
import { CovalentCoreModule } from '@covalent/core';
import { CovalentHttpModule, IHttpInterceptor } from '@covalent/http';
import { CovalentHighlightModule } from '@covalent/highlight';
import { CovalentMarkdownModule } from '@covalent/markdown';
import { CovalentChartsModule } from '@covalent/charts';
import { AppComponent } from './app.component';
import { MainComponent } from './main/main.component';
import { DashboardComponent } from './dashboard/dashboard.component';
import { TemplatesComponent } from './templates/templates.component';
import { DashboardTemplateComponent } from './templates/dashboard/dashboard.component';
import { EmailTemplateComponent } from './templates/email/email.component';
import { EditorTemplateComponent } from './templates/editor/editor.component';
import { appRoutes, appRoutingProviders } from './app.routes';
import { ChartComponent } from '../components/chart/chart.component';
import { RequestInterceptor } from '../config/interceptors/request.interceptor';
import { NgxChartsModule } from '@swimlane/ngx-charts';
import { ProducersComponent } from './producers/producers.component';
import { ProductTypesComponent } from './product-types/product-types.component';
import { ProductsComponent } from './products/products.component';
import { PackagesComponent } from './packages/packages.component';
import { LogisticsComponent } from './logistics/logistics.component';
import { TransportsComponent } from './transports/transports.component';
import { LogisticsServicesComponent } from './logistics-services/logistics-services.component';
import { WarehousesComponent } from './warehouses/warehouses.component';
import { ShipmentsComponent } from './shipments/shipments.component';
import { EndUsersComponent } from './end-users/end-users.component';
import { LoginComponent } from './login/login.component';
import {APIServiceService} from "./services/apiservice.service";
import { AgmCoreModule } from 'angular2-google-maps/core';
import { ProducersByStateComponent } from './producers-by-state/producers-by-state.component';
import { ProductsStateComponent } from './products-state/products-state.component';
const httpInterceptorProviders: Type<any>[] = [
RequestInterceptor,
];
@NgModule({
declarations: [
AppComponent,
MainComponent,
DashboardComponent,
ChartComponent,
TemplatesComponent,
DashboardTemplateComponent,
EmailTemplateComponent,
EditorTemplateComponent,
ProducersComponent,
ProductTypesComponent,
ProductsComponent,
PackagesComponent,
LogisticsComponent,
TransportsComponent,
LogisticsServicesComponent,
WarehousesComponent,
ShipmentsComponent,
EndUsersComponent,
LoginComponent,
ProducersByStateComponent,
ProductsStateComponent,
], // directives, components, and pipes owned by this NgModule
imports: [
BrowserModule,
CovalentCoreModule.forRoot(),
CovalentChartsModule.forRoot(),
CovalentHttpModule.forRoot({
interceptors: [{
interceptor: RequestInterceptor, paths: ['**'],
}],
}),
CovalentHighlightModule.forRoot(),
CovalentMarkdownModule.forRoot(),
appRoutes,
NgxChartsModule,
AgmCoreModule.forRoot({
apiKey: '<KEY>'
})
], // modules needed to run this module
providers: [
appRoutingProviders,
httpInterceptorProviders,
Title,
APIServiceService
], // additional providers needed for this module
entryComponents: [ ],
bootstrap: [ AppComponent ],
})
export class AppModule {}
|
<filename>java-utils-elasticsearch/src/main/java/ca/nrc/dtrc/elasticsearch/request/_Source.java
package ca.nrc.dtrc.elasticsearch.request;
import org.json.JSONArray;
import org.json.JSONObject;
public class _Source extends RequestBodyElement {
String[] fields = null;
public _Source(String... _fields) {
super();
fields = _fields;
}
@Override
public JSONObject jsonObject() {
JSONObject jObj = new JSONObject();
JSONArray jFieldsArr = new JSONArray();
for (String aField: fields) {
jFieldsArr.put(aField);
}
return new JSONObject().put("_source", jFieldsArr);
}
}
|
Originally drafted in 1962, the city’s development ordinance is ready for a new millennium upgrade.
Jennifer Clark, the city’s director for community development, said a new and improved unified development ordinance will provide clear guidelines for land use in the city and evaluate the impact of the uses in existing neighborhoods.
In a study session presentation Monday to the Independence City Council, Clark said the ordinance would also reduce conflicting land uses and increase compatible ones.
“The (original) ordinance considers zoning and subdivisions (developments) separately, which creates a disjointed process for review in land use proposals,” Clark said.
Clark added the new ordinance is necessary to meet the needs of today’s market, keep up with current trends and technology, improve the quality of new development and enhance and preserve existing neighborhoods.
“This is... certainly keeping with the desires of the community,” she said.
The main purpose of the ordinance, Clark said, is to entice higher quality developments and clarify the development process. For example, Clark said in residential zoning districts, the current ordinance – which was revised in 1974 and 1999 – has no current design guidelines in 12 residential districts, allows 1-43 units per acre and requires open space in up to 25 percent of developmental sites.
The proposed ordinance would apply citywide residential design guidelines, reduce residential districts to eight, allow 1-30 units per acre and require open space at 30 percent of all sites; 60 percent for conservation development, which would reduce lot sizes.
“Many of the changes simply identify current practices more carefully to eliminate subjectivity,” Clark said.
A public hearing on the UDO is scheduled for May 7 during a Planning Commission hearing. The ordinance will be introduced to the City Council June 1, voted on two weeks later and, if approved, implemented July 1.
A public hearing on the adoption of a new Unified Development Ordinance to replace the current Independence zoning code will be before the Planning Commission at 6:30 p.m. May 7 in the City Council Chambers on the lower level at City Hall, 111 E. Maple.
A review draft has been completed and is available for viewing at the Community Development Department located on the 2nd Floor of City Hall. It is also on the city’s Web site www.independencemo.org on the Community Development Department page.
The current zoning ordinance was originally adopted in 1962 with major revisions in 1974 and 1999. |
/*
* Copyright 2014-2017 Real Logic Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.agrona.concurrent.broadcast;
import org.agrona.concurrent.MessageHandler;
import org.agrona.concurrent.UnsafeBuffer;
import java.nio.ByteBuffer;
/**
* Receiver that copies messages that have been broadcast to enable a simpler API for the client.
*/
public class CopyBroadcastReceiver
{
/**
* Default length for the scratch buffer for copying messages into.
*/
public static final int SCRATCH_BUFFER_LENGTH = 4096;
private final BroadcastReceiver receiver;
private final UnsafeBuffer scratchBuffer;
/**
* Wrap a {@link BroadcastReceiver} to simplify the API for receiving messages.
*
* @param receiver to be wrapped.
* @param scratchBufferLength is the maximum length of a message to be copied when receiving.
*/
public CopyBroadcastReceiver(final BroadcastReceiver receiver, final int scratchBufferLength)
{
this.receiver = receiver;
scratchBuffer = new UnsafeBuffer(ByteBuffer.allocateDirect(scratchBufferLength));
while (receiver.receiveNext())
{
// If we're reconnecting to a broadcast buffer then we need to
// scan ourselves up to date, otherwise we risk "falling behind"
// the buffer due to the time taken to catchup.
}
}
/**
* Wrap a {@link BroadcastReceiver} to simplify the API for receiving messages.
*
* @param receiver to be wrapped.
*/
public CopyBroadcastReceiver(final BroadcastReceiver receiver)
{
this(receiver, SCRATCH_BUFFER_LENGTH);
}
/**
* Receive one message from the broadcast buffer.
*
* @param handler to be called for each message received.
* @return the number of messages that have been received.
*/
public int receive(final MessageHandler handler)
{
int messagesReceived = 0;
final BroadcastReceiver receiver = this.receiver;
final long lastSeenLappedCount = receiver.lappedCount();
if (receiver.receiveNext())
{
if (lastSeenLappedCount != receiver.lappedCount())
{
throw new IllegalStateException("Unable to keep up with broadcast buffer");
}
final int length = receiver.length();
final int capacity = scratchBuffer.capacity();
if (length > capacity)
{
throw new IllegalStateException(String.format(
"Buffer required length of %d but only has %d", length, capacity));
}
final int msgTypeId = receiver.typeId();
scratchBuffer.putBytes(0, receiver.buffer(), receiver.offset(), length);
if (!receiver.validate())
{
throw new IllegalStateException("Unable to keep up with broadcast buffer");
}
handler.onMessage(msgTypeId, scratchBuffer, 0, length);
messagesReceived = 1;
}
return messagesReceived;
}
}
|
<filename>src/modules/school/useCases/createQuestion/tests/createQuestionController.spec.ts<gh_stars>1-10
import jwt from 'jsonwebtoken';
import request from 'supertest';
import { Connection } from 'typeorm';
import { app } from '@shared/infra/http/app';
import { connection } from '@shared/infra/typeorm/index';
import authConfig from '../../../../../config/auth';
let db: Connection;
describe('Create Question Controller', () => {
beforeAll(async () => {
db = await connection();
await db.runMigrations();
});
afterAll(async () => {
await db.query(
'drop table users; drop table alternatives; drop table questions; drop table school_tests; drop table migrations'
);
await db.close();
});
it('should be able to create new question', async () => {
const newUser = await request(app).post('/school/users').send({
name: 'example <NAME>',
email: '<EMAIL>',
password: '<PASSWORD>',
teacher: true,
});
const authenticateUser = await request(app).post('/school/login').send({
email: '<EMAIL>',
password: '<PASSWORD>',
});
const schoolTest = await request(app)
.post('/school/schoolTests')
.send({
title: 'title test',
subjects: 'test subject',
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
const { id } = schoolTest.body;
const response = await request(app)
.post('/school/question')
.send({
question: 'question test',
test_id: id,
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
expect(newUser.status).toBe(201);
expect(authenticateUser.status).toBe(200);
expect(schoolTest.status).toBe(201);
expect(response.status).toBe(201);
});
it('should not be able create a new question with empty question', async () => {
const newUser = await request(app).post('/school/users').send({
name: '<NAME>',
email: '<EMAIL>',
password: '<PASSWORD>',
teacher: true,
});
const authenticateUser = await request(app).post('/school/login').send({
email: '<EMAIL>',
password: '<PASSWORD>',
});
const schoolTest = await request(app)
.post('/school/schoolTests')
.send({
title: 'title test',
subjects: 'test subject',
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
const { id } = schoolTest.body;
const response = await request(app)
.post('/school/question')
.send({
test_id: id,
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
expect(newUser.status).toBe(201);
expect(authenticateUser.status).toBe(200);
expect(schoolTest.status).toBe(201);
expect(response.status).toBe(400);
});
it('should not be able create a new question with not authorized user', async () => {
const newUser = await request(app).post('/school/users').send({
name: 'example user 1',
email: '<EMAIL>',
password: '<PASSWORD>',
teacher: false,
});
const authenticateUser = await request(app).post('/school/login').send({
email: '<EMAIL>',
password: '<PASSWORD>',
});
const schoolTest = await request(app)
.post('/school/schoolTests')
.send({
title: 'title test',
subjects: 'test subject',
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
const { id } = schoolTest.body;
const response = await request(app)
.post('/school/question')
.send({
question: 'question test',
test_id: id,
})
.set({
authorization: `Bearer ${authenticateUser.body.token}`,
});
expect(newUser.status).toBe(201);
expect(authenticateUser.status).toBe(200);
expect(schoolTest.status).toBe(401);
expect(response.status).toBe(401);
});
});
|
<filename>src/add-ons/accelerants/neomagic/engine/nm_globals.h
extern int fd;
extern shared_info *si;
extern area_id shared_info_area;
extern area_id regs_area, regs2_area;
extern vuint32 *regs, *regs2;
extern display_mode *my_mode_list;
extern area_id my_mode_list_area;
extern int accelerantIsClone;
extern nm_get_set_pci nm_pci_access;
extern nm_in_out_isa nm_isa_access;
extern nm_bes_data nm_bes_access;
|
Timedependent insulin oligomer reaction pathway prior to fibril formation: Cooling and seeding The difficulty in identifying the toxic agents in all amyloidrelated diseases is likely due to the complicated kinetics and thermodynamics of the nucleation process and subsequent fibril formation. The slow progression of these diseases suggests that the formation, incorporation, and/or action of toxic agents are possibly rate limiting. Candidate toxic agents include precursors (some at very low concentrations), also called oligomers and protofibrils, and the fibrils. Here, we investigate the kinetic and thermodynamic behavior of human insulin oligomers (imaged by cryoEM) under fibrilforming conditions (pH 1.6 and 65°C) by probing the reaction pathway to insulin fibril formation using two different types of experimentscooling and seedingand confirm the validity of the nucleation model and its effect on fibril growth. The results from both the cooling and seeding studies confirm the existence of a timechanging oligomer reaction process prior to fibril formation that likely involves a ratelimiting nucleation process followed by structural rearrangements of intermediates (into sheet rich entities) to form oligomers that then form fibrils. The latter structural rearrangement step occurs even in the absence of nuclei (i.e., with added heterologous seeds). Nuclei are formed at the fibrillation conditions (pH 1.6 and 65°C) but are also continuously formed during cooling at pH 1.6 and 25°C. Within the timescale of the experiments, only after increasing the temperature to 65°C are the trapped insulin nuclei and resultant structures able to induce the structural rearrangement step and overcome the energy barrier to form fibrils. This delay in fibrillation and accumulation of nuclei at low temperature (25°C) result in a decrease in the mean length of the fibers when placed at 65°C. Fits of an empirical model to the data provide quantitative measures of the delay in the lagtime during the nucleation process and subsequent reduction in fibril growth rate resulting from the cooling. Also, the seeding experiments, within the timescale of the measurements, demonstrate that fibers can initiate fast fibrillation with dissolved insulin (fresh or taken during the lagperiod) but not with other fibers. Qualitatively this is explained with a conjectual freeenergy space plot. Proteins 2009. © 2009 WileyLiss, Inc. |
Q:
Is it possible to multiplayer Project Zomboid with only one copy of the game?
See, I have a few friends coming over for a LAN party, and I want a game to play together, but I'm a poor student. Is it possible to use the GOG copy of the game to play together on LAN? It's really not possible for us to shell out the money needed for 6 copies and we only want to play it for one night. In addition, I'm not going to share the game with the friends, and I'll delete it after the night.
I'm more concerned with technical feasibility, rather than legal feasibility, since I live out of US jurisdiction in a country that doesn't tend to prosecute such crimes. I've nothing but respect for the creators of Project Zomboid, but financially, I don't have the money to purchase 6 copies, and I think that playing a single night with friends, would fall under the "watching movies together" metaphor outlined in GOG's FAQ.
A:
While the game itself may be free of DRM, GOG.COM policies do not allow it
Can I enjoy my purchases both on my laptop and desktop computer at
home?
Yes. We do not limit the number of installations or reinstallations,
as long as you install your purchased games on computers in your
household. So yeah, if you've got a render-farm in the basement, you
might actually break the world record for the number of legal Witcher
installations in one household. However, if you think about installing
your game on a friend's machine or sharing it with others then please
don't do it, okay? The same principle applies to movies - you're free
to watch them anywhere you want, with anyone you want, as long as you
don't share them with people who haven't purchased them.
Emphasis mine |
Mountain View, CA. In a presentation at the 7th RISC-V Workshop, Esperanto Technologies announced that it plans to develop energy-efficient computing solutions for artificial intelligence (AI) and machine learning based on the open standard RISC-V Instruction Set Architecture (ISA). The company’s first RISC-V AI system on chips (SoCs) will leverage 16 ET-Maxion 64-bit RISC-V cores, 4,096 energy-efficient ET-Minion RISC-V cores (each with a vector floating point unit), and be designed on 7 nm CMOS process technology.
“By designing in leading-edge 7 nm CMOS and with the simplicity of the RISC-V architecture, we can fit over 4,000 full 64-bit cores each with vector accelerators on a single chip,” Dave Ditzel, President and CEO of Esperanto Technologies said during a presentation entitled Industrial-Strength, High-Performance RISC-V Processors for Energy-Efficient Computing. “By basing our chip on RISC-V we can take advantage of the growing software base of operating systems, compilers, and applications. RISC-V is so simple and extensible that we can deliver world-class TeraFlop levels of computing without needing to resort to proprietary instruction sets, thereby greatly increasing software availability.”
Esperanto Technologies also plans to license its ET-Maxion and ET-Minion cores to help proliferate the RISC-V architecture.
For more information on Esperanto Technologies, visit www.esperanto.ai. For more on the RISC-V Foundation or 7th RISC-V Workshop, go to https://riscv.org/.
eletter-11-28-2017 |
Shout It Out Loud (Kiss song)
Overview
The title of the song was taken from British beat group The Hollies' song "We Want to Shout It Out Loud", which Wicked Lester (pre-Kiss) recorded for their only unreleased album.
The song was edited several times for singles and compilation albums. The single version of the song is 12 seconds shorter than the studio due to the last "Shout it, shout it, shout it out loud" line being excluded. The Alive II single, which was described by Billboard magazine as a "raucous rocker" that "catches the excitement and energy of the live show," is 23 seconds shorter than the album song due to the removal of audience chant "We want Kiss". The Killers version is the shortest with the duration of 2:35 as a result of the fade-out of the song starting during the first repetition of the "Shout it, shout it, shout it out loud" lyric following Gene Simmons' "Oh yeah". The version found on Smashes, Thrashes & Hits is 16 seconds longer, as the chorus repeats and the song ends with "Shout it!".
Released as a single in 1976, the band and their record company, Casablanca Records, were trying to cash in on the success of their previous single, the live version of "Rock and Roll All Nite", by releasing another anthem. While the song would break into the American Billboard Top 40, peaking at #31, it would prove to not be as successful as its predecessor was but would remain in the band's concert set lists for almost every tour from that point on. In Canada, the single was far more successful, reaching #1 on the RPM national singles chart on May 22, 1976. The song is also one of few to be sung by both Gene Simmons and Paul Stanley. A music video was made for the live version of the song from the 1996 concert in Tiger Stadium, from the Alive/Worldwide Tour. It was directed by Wayne Isham.
Legacy
The song was performed on almost every tour since its release, being dropped for Lick It Up Tour, Animalize Tour and Asylum Tour. It was listed as the 27th single of '76 in Canada.
A 2007 re-recording of the song by the band is featured on the music/rhythm video game Guitar Hero 5. The song is featured in the movie Detroit Rock City and appears on the soundtrack for the movie. The cast of Glee covered the song in the episode "Theatricality". The male part of the group (excluding Kurt Hummel) were also dressed as Kiss members. The song was also featured in a What's New, Scooby-Doo? episode "A Scooby-Doo Halloween".
The song was covered several times. In 1978 by James Last, in 1998 by Pretty Boy Floyd and Zeke. The all-female band Crucified Barbara covered the song in 2006 and Stryper released a cover of the song on their 2011 cover album The Covering. Erik Grönwall, the 2009 winner of the Swedish Idol covered the song on his eponymous debut album. While it was not released as a single, the song charted on Sverigetopplistan, reaching number 49. Grönwall also performed the song on Swedish Idol. It was covered by The Yellow Monkey for the Jigoku no Shōsan: Kiss Tribute in Japan (地獄の賞賛-KISS TRIBUTE IN JAPAN-) album in 1998, Lemmy's cover appears on 2004's Spin the Bottle: An All-Star Tribute to Kiss, Sack Trick with Iron Maiden's Bruce Dickinson covered the song for the 2005 Sheep in KISS Make Up album, and Keri Kelli's cover appears on Lick It Up – A Millennium Tribute to Kiss from 2008. |
/* eslint-disable @typescript-eslint/no-explicit-any,@typescript-eslint/explicit-module-boundary-types */
export class RGBColor {
public ok: boolean
public a?: number = undefined
public r = 0
public g = 0
public b = 0
private readonly simpleColors: { [key: string]: string } = {}
// eslint-disable-next-line @typescript-eslint/ban-types
private colorDefs: { re: RegExp; example: string[]; process: Function }[] = []
constructor(colorString?: string) {
this.ok = false
if (!colorString) {
return
}
// strip any leading #
if (colorString.charAt(0) == '#') {
// remove # if any
colorString = colorString.substr(1, 6)
}
colorString = colorString.replace(/ /g, '')
colorString = colorString.toLowerCase()
// before getting into regexps, try simple matches
// and overwrite the input
this.simpleColors = {
aliceblue: 'f0f8ff',
antiquewhite: 'faebd7',
aqua: '00ffff',
aquamarine: '7fffd4',
azure: 'f0ffff',
beige: 'f5f5dc',
bisque: 'ffe4c4',
black: '000000',
blanchedalmond: 'ffebcd',
blue: '0000ff',
blueviolet: '8a2be2',
brown: 'a52a2a',
burlywood: 'deb887',
cadetblue: '5f9ea0',
chartreuse: '7fff00',
chocolate: 'd2691e',
coral: 'ff7f50',
cornflowerblue: '6495ed',
cornsilk: 'fff8dc',
crimson: 'dc143c',
cyan: '00ffff',
darkblue: '00008b',
darkcyan: '008b8b',
darkgoldenrod: 'b8860b',
darkgray: 'a9a9a9',
darkgrey: 'a9a9a9',
darkgreen: '006400',
darkkhaki: 'bdb76b',
darkmagenta: '8b008b',
darkolivegreen: '556b2f',
darkorange: 'ff8c00',
darkorchid: '9932cc',
darkred: '8b0000',
darksalmon: 'e9967a',
darkseagreen: '8fbc8f',
darkslateblue: '483d8b',
darkslategray: '2f4f4f',
darkslategrey: '2f4f4f',
darkturquoise: '00ced1',
darkviolet: '9400d3',
deeppink: 'ff1493',
deepskyblue: '00bfff',
dimgray: '696969',
dimgrey: '696969',
dodgerblue: '1e90ff',
feldspar: 'd19275',
firebrick: 'b22222',
floralwhite: 'fffaf0',
forestgreen: '228b22',
fuchsia: 'ff00ff',
gainsboro: 'dcdcdc',
ghostwhite: 'f8f8ff',
gold: 'ffd700',
goldenrod: 'daa520',
gray: '808080',
grey: '808080',
green: '008000',
greenyellow: 'adff2f',
honeydew: 'f0fff0',
hotpink: 'ff69b4',
indianred: 'cd5c5c',
indigo: '4b0082',
ivory: 'fffff0',
khaki: 'f0e68c',
lavender: 'e6e6fa',
lavenderblush: 'fff0f5',
lawngreen: '7cfc00',
lemonchiffon: 'fffacd',
lightblue: 'add8e6',
lightcoral: 'f08080',
lightcyan: 'e0ffff',
lightgoldenrodyellow: 'fafad2',
lightgray: 'd3d3d3',
lightgrey: 'd3d3d3',
lightgreen: '90ee90',
lightpink: 'ffb6c1',
lightsalmon: 'ffa07a',
lightseagreen: '20b2aa',
lightskyblue: '87cefa',
lightslateblue: '8470ff',
lightslategray: '778899',
lightslategrey: '778899',
lightsteelblue: 'b0c4de',
lightyellow: 'ffffe0',
lime: '00ff00',
limegreen: '32cd32',
linen: 'faf0e6',
magenta: 'ff00ff',
maroon: '800000',
mediumaquamarine: '66cdaa',
mediumblue: '0000cd',
mediumorchid: 'ba55d3',
mediumpurple: '9370d8',
mediumseagreen: '3cb371',
mediumslateblue: '7b68ee',
mediumspringgreen: '00fa9a',
mediumturquoise: '48d1cc',
mediumvioletred: 'c71585',
midnightblue: '191970',
mintcream: 'f5fffa',
mistyrose: 'ffe4e1',
moccasin: 'ffe4b5',
navajowhite: 'ffdead',
navy: '000080',
oldlace: 'fdf5e6',
olive: '808000',
olivedrab: '6b8e23',
orange: 'ffa500',
orangered: 'ff4500',
orchid: 'da70d6',
palegoldenrod: 'eee8aa',
palegreen: '98fb98',
paleturquoise: 'afeeee',
palevioletred: 'd87093',
papayawhip: 'ffefd5',
peachpuff: 'ffdab9',
peru: 'cd853f',
pink: 'ffc0cb',
plum: 'dda0dd',
powderblue: 'b0e0e6',
purple: '800080',
red: 'ff0000',
rosybrown: 'bc8f8f',
royalblue: '4169e1',
saddlebrown: '8b4513',
salmon: 'fa8072',
sandybrown: 'f4a460',
seagreen: '2e8b57',
seashell: 'fff5ee',
sienna: 'a0522d',
silver: 'c0c0c0',
skyblue: '87ceeb',
slateblue: '6a5acd',
slategray: '708090',
slategrey: '708090',
snow: 'fffafa',
springgreen: '00ff7f',
steelblue: '4682b4',
tan: 'd2b48c',
teal: '008080',
thistle: 'd8bfd8',
tomato: 'ff6347',
turquoise: '40e0d0',
violet: 'ee82ee',
violetred: 'd02090',
wheat: 'f5deb3',
white: 'ffffff',
whitesmoke: 'f5f5f5',
yellow: 'ffff00',
yellowgreen: '9acd32'
}
for (const key in this.simpleColors) {
if (colorString == key) {
colorString = this.simpleColors[key]
}
}
// emd of simple type-in colors
// array of color definition objects
this.colorDefs = [
{
re: /^rgb\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})\)$/,
example: ['rgb(123, 234, 45)', 'rgb(255,234,245)'],
process: function(bits: any) {
return [parseInt(bits[1]), parseInt(bits[2]), parseInt(bits[3])]
}
},
{
re: /^(\w{2})(\w{2})(\w{2})$/,
example: ['#00ff00', '336699'],
process: function(bits: any) {
return [parseInt(bits[1], 16), parseInt(bits[2], 16), parseInt(bits[3], 16)]
}
},
{
re: /^(\w{1})(\w{1})(\w{1})$/,
example: ['#fb0', 'f0f'],
process: function(bits: any) {
return [
parseInt(bits[1] + bits[1], 16),
parseInt(bits[2] + bits[2], 16),
parseInt(bits[3] + bits[3], 16)
]
}
}
]
// search through the definitions to find a match
for (let i = 0; i < this.colorDefs.length; i++) {
const re = this.colorDefs[i].re
const processor = this.colorDefs[i].process
const bits = re.exec(colorString)
if (bits) {
const channels = processor(bits)
this.r = channels[0]
this.g = channels[1]
this.b = channels[2]
this.ok = true
}
}
// validate/cleanup values
this.r = this.r < 0 || isNaN(this.r) ? 0 : this.r > 255 ? 255 : this.r
this.g = this.g < 0 || isNaN(this.g) ? 0 : this.g > 255 ? 255 : this.g
this.b = this.b < 0 || isNaN(this.b) ? 0 : this.b > 255 ? 255 : this.b
}
toRGB() {
return 'rgb(' + this.r + ', ' + this.g + ', ' + this.b + ')'
}
toRGBA() {
return 'rgba(' + this.r + ', ' + this.g + ', ' + this.b + ', ' + (this.a || '1') + ')'
}
toHex() {
let r = this.r.toString(16)
let g = this.g.toString(16)
let b = this.b.toString(16)
if (r.length == 1) r = '0' + r
if (g.length == 1) g = '0' + g
if (b.length == 1) b = '0' + b
return '#' + r + g + b
}
// help
getHelpXML() {
const examples = []
// add regexps
for (let i = 0; i < this.colorDefs.length; i++) {
const example = this.colorDefs[i].example
for (let j = 0; j < example.length; j++) {
examples[examples.length] = example[j]
}
}
// add type-in colors
for (const sc in this.simpleColors) {
examples[examples.length] = sc
}
const xml = document.createElement('ul')
xml.setAttribute('id', 'rgbcolor-examples')
for (let i = 0; i < examples.length; i++) {
try {
const listItem = document.createElement('li')
const listColor = new RGBColor(examples[i])
const exampleDiv = document.createElement('div')
exampleDiv.style.cssText =
'margin: 3px; ' +
'border: 1px solid black; ' +
'background:' +
listColor.toHex() +
'; ' +
'color:' +
listColor.toHex()
exampleDiv.appendChild(document.createTextNode('test'))
const listItemValue = document.createTextNode(
' ' + examples[i] + ' -> ' + listColor.toRGB() + ' -> ' + listColor.toHex()
)
listItem.appendChild(exampleDiv)
listItem.appendChild(listItemValue)
xml.appendChild(listItem)
} catch (e) {}
}
return xml
}
}
|
import requests
from bs4 import BeautifulSoup
r = requests.get('https://python123.io/ws/demo.html')
r.text
demo = r.text
soup = BeautifulSoup(demo, 'html.parser')
#print(soup.prettify())
print(soup.title)
print(soup.a)
tag = soup.a
print(tag.attrs)
print(tag.attrs['class'])
print(soup.p) |
Kinetics of carbonation of light lanthanides The article provides the problem of extracting rare earth metals from technogenic raw materials. The analysis showed that there is no effective technology. It is required to consider the thermodynamic and kinetic parameters of the carbonization process. The process of carbonization of sediments of light lanthanides is considered. The work determines the effect of temperature and degree of mixing on the extraction process. The activation energy of the process was calculated. The equation for the dependence of the reaction rate on temperature is given It was shown that the process of carbonization of light lanthanides proceeds in the diffusion region. Introduction Rare earth metals represent great potential for the development of modern technologies, particularly for the production of materials for high-tech consumption areas. So, rare earth elements are used for the synthesis of block oxide catalysts for the CO conversion, for the production of phosphors on the basis of rare earth metals (REM), for the production of neodymium magnets, which constitute power units of hybrid and electric cars, as well as samarium-cobalt magnets are used in the defense and space industries. With an increase in the rate of production, the amount of REM-containing wastes increases significantly, while traditional sources are depleted. Therefore, the most urgent task is to involve technogenic raw materials in the production process as an alternative source of valuable components, thus reducing the amount of emissions of harmful substances, thereby improving the environmental performance. As a result, methods for processing waste products and industrial waste are being developed. They include extraction, sorption and biosorption, methods for extracting rare earth metals, as well as methods of acid and bioleaching. In addition to the considered secondary sources of rare earth metals, waste from alumina production -red mud, as well as waste from the production of extraction phosphoric acid -phosphogypsum have a great potential. The rare earth metals present in them are contained in the form of insoluble precipitates in oxide, carbonate, hydroxide and phosphate forms. Red mud (RM) is formed during the processing of bauxite ores by the Bayer method and poses a serious threat to the environment due to the high alkali content, from about 2 to 10 wt. % in terms of Na2O. Annually, more than 120 million tons of red mud accumulate all over the world resulting from the alumina production. RM is stored in tailings as a solid residue. Existing red mud processing technologies include pyrometallurgical and hydrometallurgical methods. The developed pyrometallurgical technologies, considering methods of low-temperature IOP Conf. Series: Earth and Environmental Science 677 052063 IOP Publishing doi:10.1088/1755-1315/677/5/052063 2 reduction at 1050-1200 °C and methods of reduction smelting, make it possible to extract iron from red mud, the content of which varies from 20 to 60 wt. % in terms of Fe2O3. The authors of the work raise the issue of processing red mud, which is so relevant in Russia. They propose a scheme for producing alumina, cast iron and cement. The resulting slags can be used to obtain alumina, titanium, rare earth metals (REM), as well as to obtain building materials. The article describes the main possible ways of using red mud. A detailed technological scheme and marketable products based on red mud were proposed. However, the existing pyrometallurgical technologies do not permit the use of the RM itself due to the high alkali content. As a result, such a process must be accompanied by a stage of alkali removal, which leads to an increase in the cost of processing. There is a developed domestic technology for extracting scandium and other valuable components from red mud, in which the process is intensified by using carbon dioxide, however, the efficiency of this process for the extraction of rare earth metals has yet to be assessed. Phosphogypsum is 80 % CaSO42H2O waste, which is formed during the production of extraction phosphoric acid from apatite concentrate. All over the world, up to 280 Mt of phosphogypsum are produced annually, most of which is sent to dumps without pretreatment and is not implented anywhere in the future. This waste is a safe secondary source of lanthanides, since the total radioactive background is not exceeded. The content of rare earth metals in phosphogypsum is in the range of 0.4 -0.6%. The co-extraction of rare earth metals during the complex processing of phosphogypsum or phosphohydrate, for example, by liquid conversion, is a fundamentally possible and promising method due to the simultaneous formation of several important products: phosphomel, which is widely used in construction and synthesis of slaked lime, and calcium sulfate (sodium, ammonium) which is used in agriculture as a fertilizer. The main method of hydrometallurgical processing of ores is leaching of solid residues of production with mineral acids or alkaline solutions. The use of acids in ores opening is impractical from the point of view of material and energy costs. Other important aspects which have a negative impact on the environment are the formation of toxic gases during leaching and the acidification of the soil. Since red mud is characterized with high alkali content, and its pH is in the range from 10 to 12.5 pH units, it is advisable to consider the method of carbonate-alkaline leaching of insoluble rare earth compounds. Despite the fact that phosphogypsum has a weak acid medium with pH≈5, the use of the acidic leaching method is ineffective in comparison with the alkaline method, since the process indicators are not high enough, and there is also a dissolution of the present impurities, which complicates the further process of extracting rare earth metals, as well as their separation. The analysis of thermodynamic data of carbonate-alkaline leaching of rare earth metals from their carbonates and hydroxides showed that this method is promising and is accompanied by rather high process parameters, namely the degree of extraction of rare earth metals into solution. To select the optimal technological parameters of the process, it is necessary to study the kinetics of the process of dissolution of rare-earth metals precipitates in carbonate-alkaline solutions. The study of the dissolution rate, that is, the kinetics of the process, is reduced to determining the rate-limiting step of the process, which can be in the diffusion, mixed or kinetic regions, and determining the order of the reaction. This study is aimed to determine the rate-controlling step of the carbonation process. This requires the determination of the activation energy. If its values are less than 40 kJ, the process takes place in the diffusion region, if the value of the activation energy is more than 60 kJ, the process is in the kinetic one. It is also necessary to show graphically the effect of the pulp stirring intensity on the degree of extraction of rare earth metals into the solution during leaching of their sediments. Materials and methods Sediments of rare earth metals belonging to the cerium group (light lanthanides), namely, cerium and neodymium sediments, were selected as objects of study. This choice is due to the high content of these elements in red mud and phosphogypsum. The experiment was carried out for neodymium and cerium 3 phosphates and carbonates. According to the obtained data of X-ray phase analysis, cerium can be found only in phosphate and carbonate forms, hydroxide form transforms into cerium dioxide. The study of the kinetics of the carbonization process consisted of the following stages: 1. Carrying out an experiment on the HEL Auto-MATE Reactor System according to the process parameters presented in tables 1 and 2. To determine the influence of the pulp stirring intensity, the experiment was carried out in accordance with the process parameters indicated in table 1. The effect of temperature on the degree of extraction of rare earth metals from the insoluble precipitates into solution was determined for different temperatures. The parameters of the experiments are shown in table 2. 2. Determination of the content of Ln 3+ in the solution after leaching by photometric and trilometric methods of analysis in the presence of Arsenazo III indicator; 3. Mathematical processing of the obtained experimental data. The determination of the apparent activation energy of the carbonization process of Ln 3+ sediments is carried out by establishing a functional dependence of the form lnk=f(1/T) (figures 2, 3). It can be seen that up to 600 rpm there is a significant effect of mixing intensity on carbonation. In the area of higher rotation values there is no significant increase in the recovery. Thus, two conclusions can be drawn: 1) in the technology of extracting rare earth metals from technogenic raw materials, the number of revolutions in the reactor should not exceed 600; 2) the obtained values of the activation energy indicate the occurrence of the process of carbonization of carbonates and phosphates of neodymium and cerium at the junction of the diffusion and transition regions. |
#include<bits/stdc++.h>
using namespace std;
#define ll long long
#define pb push_back
#define loopi for(int i=0;i<n;i++)
int solve(){
int n;cin>>n;
int m=51,M=0;
int a[n+1]={};
loopi{
int w;cin>>w;
a[w]++;
m=min(m,w);M=max(M,w);
}
int ans=0,c=0;
for(int s=2*m;s<=2*M;s++){
c=0;
for(int i=m;i<(s+1)/2;i++){
if(s>n+i) continue;
c+=min(a[i],a[s-i]);
}
if(s%2==0) c+=a[s/2]/2;
ans=max(ans,c);
}
cout<<ans<<endl;
return 0;
}
int main() {
int t;cin>>t;
while(t--){
solve();
}
return 0;
}
|
Full Progress of Digital Signal Processing in Open Loop-IFOG The fiber optic gyroscope, which is solid state sensor, represents a highly successful impact of fiber optics on rotation sensing. It has advantages of small size and weight, and low cost. In the construction process, there are two things that cause to reduce accuracy; quality of optical components and DSP program. In this paper we reviewed structure and operation of interferometric optical fiber gyro and in continue, described process of derivation of rotation rate from output current and simulated this approach |
Church Services in a Complex Continuing Care Hospital: Why Bother? This research aimed to explore patient motivation for attending hospital-run church services in a complex continuing care hospital setting, as well as the perceived spiritual benefits as categorized by Fitchetts 77 Model for Spiritual Assessment. Invitations to participate in one-to-one interviews were offered to all patient attendees at both an ecumenical and a Roman Catholic service over the course of several weeks. We collected 20 interviews before performing a qualitative analysis, at which point we determined that saturation of content had been reached. The key findings were that participants identified the strongest perceived benefits in Experiences and Emotions, and Rituals and Practice, suggesting that access to the ritual of Sunday church services contributes meaningfully to participants coping strategies and overall quality of life. |
# Author: <NAME>
# Date: 11/08/2018
# Git-Hub: Data-is-Life
import numpy as np
from ast import literal_eval
from pandas import DataFrame, to_datetime, to_timedelta, to_numeric
'''All functions used to clean up game log from Chess.com'''
def custom_round(x, base=20):
'''Helps to round digits'''
return int(base * round(float(x) / base))
def initial_chess_data(filename):
'''First function:
Input:
filename = Game log from Chess.com
Cleans the file for any unnessacery lines from the game log file
Returns:
icd_t = All game information as a text'''
with open(filename, 'r+') as file:
icd_l = file.readlines()
icd_t = " ".join(num for num in icd_l if len(num) > 4)
return icd_t
def chess_data_cleanup(chess_text):
'''Second function:
Input:
chess_text = All game information as a text
Creates a df where one row is for game information, the following row
is moves in that game.
Returns:
df = dataframe with game information and moves'''
chess_text = chess_text.replace('[', '')
chess_text = chess_text.replace(']', '')
chess_text = chess_text.replace('\n', ' ')
chess_text = chess_text.replace(' ', ' ').replace(' ', ' ')
chess_text = chess_text.replace('... ', 'b":"').replace('. ', 'w":"')
chess_text = chess_text.replace('", ', '", "').replace(' {%clk ', '^')
chess_text = chess_text.replace(' {%clk', '^')
chess_text = chess_text.replace('}', '",').replace('", ', '", "')
chess_text = chess_text.replace(' Site "Chess.com" D', ', D')
chess_text = chess_text.replace('Event ', '}~{"Event":')
chess_text = chess_text.replace('", Date ', '", "Date": ')
chess_text = chess_text.replace('" Result ', '", "Result": ')
chess_text = chess_text.replace('" Round ', '", "Round": ')
chess_text = chess_text.replace('" White ', '", "White": ')
chess_text = chess_text.replace('" Black ', '", "Black": ')
chess_text = chess_text.replace('" WhiteElo ', '", "WhiteElo": ')
chess_text = chess_text.replace('" TimeControl ', '", "TimeControl": ')
chess_text = chess_text.replace('" EndTime ', '", "EndTime": ')
chess_text = chess_text.replace('" BlackElo ', '", "BlackElo": ')
chess_text = chess_text.replace('" Termination ', '", "Termination": ')
chess_text = chess_text.replace(' PST', '')
chess_text = chess_text.replace(' PDT', '')
chess_text = chess_text.replace(' ', ' ').replace(' ', ' ')
chess_text = chess_text.replace(' ', ' ')
chess_text = chess_text.replace('" 1w":[', '"}~{"1w":[')
chess_text = chess_text.replace('" 1w":"', '"}~{"1w":"')
chess_text = chess_text.replace(', "1/2-1/2 }~{', '}~{')
chess_text = chess_text.replace(', "1-0 }~{', '}~{')
chess_text = chess_text.replace(', "0-1 }~{', '}~{')
chess_text = chess_text.replace(', "1-0 ', '}').replace(', "}', '}')
chess_text = chess_text.replace(', "1-0', '}').replace(', "0-1', '}')
# Using '~' as a separator
cl = ''.join([num for num in chess_text]).split("~")
# Named the only column "a", so it is easier in the next function
df = DataFrame(cl, columns=['a'])
# If length of any string is less than 3, it is not needed
df = df[df['a'].str.len() > 3]
return df
def data_cleaning_1(df):
'''Third function:
Input:
df = df with all information
Creates two dfs. First df is all games information. Second df is for all
the moves in those games.
Output:
m_df = moves df with all the moves
d_df = information df with all the game information'''
c_df = DataFrame(data=list(df['a'].apply(literal_eval)))
c_df['Date'].fillna(method='ffill', inplace=True)
c_df['EndTime'].fillna(method='ffill', inplace=True)
# Convert all the dates and time to dates and times
c_df.loc[:, 'date_time'] = to_datetime(
c_df['Date'] + ' ' + c_df['EndTime'])
c_df.loc[:, 'Date'] = to_datetime(c_df['Date'])
c_df.loc[:, 'EndTime'] = to_timedelta(c_df['EndTime'])
# Split moves to a new df drop columns not needed
m_df = c_df[c_df['White'].isnull()].copy()
m_df.sort_values('date_time', inplace=True)
m_df.reset_index(inplace=True)
m_df.drop(columns=[
'index', 'Date', 'White', 'Black', 'Result', 'WhiteElo',
'BlackElo', 'TimeControl', 'EndTime', 'Termination', 'date_time',
'Round', 'Event'], inplace=True)
# Split game information to a new df
d_df = c_df[c_df['1w'].isnull()].copy()
d_df = d_df[['Date', 'White', 'Black', 'Result', 'WhiteElo', 'BlackElo',
'TimeControl', 'EndTime', 'Termination', 'date_time']]
d_df.sort_values('date_time', inplace=True)
d_df.reset_index(inplace=True)
d_df.drop(columns=['index', 'date_time'], inplace=True)
# Rename all columns to lower case and insert "_" to split words
d_df.rename(columns={
'Date': 'date', 'White': 'white', 'Black': 'black',
'Result': 'result', 'WhiteElo': 'white_elo', 'BlackElo': 'black_elo',
'TimeControl': 'game_time', 'EndTime': 'end_time',
'Termination': 'termination'}, inplace=True)
d_df.loc[:, 'num_moves'] = m_df.count(axis=1)
d_df.loc[:, 'white_elo'] = to_numeric(d_df['white_elo'])
d_df.loc[:, 'black_elo'] = to_numeric(d_df['black_elo'])
d_df.loc[:, 'color'] = np.where(d_df['white'] == 'TrueMoeG', 1, 0)
# Drop duplicate rows
d_df.drop_duplicates(inplace=True)
m_df.drop_duplicates(inplace=True)
m_df.reset_index(inplace=True)
m_df.drop(columns=['index'], inplace=True)
d_df.reset_index(inplace=True)
d_df.drop(columns=['index'], inplace=True)
return m_df, d_df
def data_cleaning_2(m_df):
'''Fourth function:
Input:
m_df = Moves df
Creates a new df that has time information and cleans moves df column names
Output:
t_df = Moves time df - all the times for moves
m_df = Moves df - Fixed column names'''
moves_column_names = ['00' + num if len(num) == 2 else
num for num in m_df.columns]
moves_column_names = ['0' + num if len(num) == 3 else
num for num in moves_column_names]
moves_column_names = [num.replace('w', 'a') for num
in moves_column_names]
m_df.columns = [num for num in moves_column_names]
moves_column_names = sorted(moves_column_names)
m_df = m_df[[num for num in moves_column_names]]
t_df = m_df.copy()
for col_name in m_df.columns:
m_df[col_name] = m_df[col_name].str.extract(r'(^\w+-?\w+?-?\w?\+?)')
for col_name in m_df.columns:
t_df[col_name] = t_df[col_name].str.extract(r'(\d\:\d+:\d+\.?\d)')
m_df.reset_index(inplace=True)
m_df.drop(columns=['index'], inplace=True)
t_df.reset_index(inplace=True)
t_df.drop(columns=['index'], inplace=True)
return m_df, t_df
def data_cleaning_3(t_df, d_df):
'''Fifth function:
Input:
t_df = Move times df
d_df = Game information df
Cleans the times df to fix the games that give extra time after each move
Returns:
t_df = Moves time df - Fixed times'''
t_df = t_df.apply(to_timedelta, errors='coerce')
t_df = t_df.apply(to_numeric, errors='coerce')
t_df = t_df.div(1_000_000_000)
t_df.loc[:, 'game_time'] = d_df['game_time']
t_df.loc[:, 'extra_time'] = t_df['game_time'].replace([
'300', '600', '180', '180+2', '300+5'], ['0', '0', '0', '2', '5'])
t_df.loc[:, 'game_time'] = d_df['game_time'].replace(
['180+2', '300+5'], [180, 300])
t_df.loc[:, 'extra_time'] = to_numeric(t_df['extra_time'])
return t_df
def data_cleaning_4(m_df, t_df, d_df):
'''Sixth function:
Input:
m_df = Moves df
t_df = Move times df
d_df = Game information df
Creates four new df moves and move times for white and black pieces.
Returns:
wh_m_df = All moves by player with white pieces
wh_t_df = All moves time by player with white pieces
bl_m_df = All moves by player with black pieces
bl_t_df = All moves time by player with black pieces
d_df = Game information df - Corrected game times + number of moves
t_df = Move times df'''
# game_time to numeric and copy those to moves time df
t_df.loc[:, 'game_time'] = to_numeric(t_df['game_time'])
d_df.loc[:, 'game_time'] = t_df['game_time']
# Create four new df moves and move times for white and black pieces
wh_m_df = m_df[m_df.columns[::2]].copy()
bl_m_df = m_df[m_df.columns[1::2]].copy()
wh_t_df = t_df[t_df.columns[::2]].copy()
bl_t_df = t_df[t_df.columns[1::2]].copy()
wh_t_df.drop(columns=[wh_t_df.columns[-1]], inplace=True)
bl_t_df.drop(columns=[bl_t_df.columns[-1]], inplace=True)
# Get number of moves per game
d_df.loc[:, 'white_num_moves'] = wh_m_df.count(axis=1)
d_df.loc[:, 'black_num_moves'] = bl_m_df.count(axis=1)
'''Go through all the columns in the move times df for white and black
pieces and subtract the time left with the total allowed time. This gives
time per move'''
for num in wh_t_df.columns:
wh_t_df.loc[:, num] = t_df['game_time'] - wh_t_df[num]
for num in bl_t_df.columns:
bl_t_df.loc[:, num] = t_df['game_time'] - bl_t_df[num]
# Create two lists: index for games that give +2 sec/move and +5 sec/move
two_list = t_df[t_df['extra_time'] == 2].index.tolist()
five_list = t_df[t_df['extra_time'] == 5].index.tolist()
# Add 2 seconds to all the moves for games that give +2 sec/move
for num in two_list:
for i in range(len(wh_t_df.columns) - 1):
wh_t_df.iloc[num, i] = wh_t_df.iloc[num, i] + ((i + 1) * 2)
for j in range(len(bl_t_df.columns) - 1):
bl_t_df.iloc[num, j] = bl_t_df.iloc[num, j] + ((j + 1) * 2)
# Add 5 seconds to all the moves for games that give +5 sec/move
for num in five_list:
for i in range(len(wh_t_df.columns) - 1):
wh_t_df.iloc[num, i] = wh_t_df.iloc[num, i] + ((i + 1) * 5)
for j in range(len(bl_t_df.columns) - 1):
bl_t_df.iloc[num, j] = bl_t_df.iloc[num, j] + ((j + 1) * 5)
# Change time values where time is really high
for num in wh_t_df.columns:
wh_t_df.loc[:, num] = np.where(wh_t_df[num] > 5000, 0, wh_t_df[num])
for num in bl_t_df.columns:
bl_t_df.loc[:, num] = np.where(bl_t_df[num] > 5000, 0, bl_t_df[num])
return wh_m_df, wh_t_df, bl_m_df, bl_t_df, d_df, t_df
def data_cleaning_5(c_t_df, t_df, d_df, col):
'''Seventh function:
Input:
c_t_df = Black or white pieces moves time df
t_df = Move times df
d_df = Game information df
col = Column name
Cleans the moves time df.
Returns:
tm_df = All moves by player with white pieces'''
tm_df = c_t_df.shift(periods=1, axis=1).copy()
tm_df = tm_df - c_t_df
tm_df = -tm_df
tm_df.loc[:, col] = c_t_df[col]
for num in tm_df.columns:
tm_df.loc[:, num] = np.where(tm_df[num] <= 0, 0, tm_df[num])
return tm_df
def help_func1(m_df, d_df):
'''Helper function:
Input:
m_df = Black or white pieces moves
Gets castling information.
Returns:
cast_list = for every game it assigns a value if the player castled.
1 if castled King side
0 if castled Queen side
-1 if didn't castle
cast_w_list = if the player castled it gets the move number the player
castled
0 if the player didn't castle'''
cast_list = []
cast_w_list = []
for i in range(len(d_df)):
if i in m_df.index:
a = list(m_df.iloc[i])
if "O-O" in a:
cast_list.append(a.index("O-O") + 1)
cast_w_list.append(1)
elif "O-O-O" in a:
cast_list.append(a.index("O-O-O") + 1)
cast_w_list.append(0)
else:
cast_list.append(0)
cast_w_list.append(-1)
else:
cast_list.append(0)
cast_w_list.append(-1)
return cast_list, cast_w_list
def data_cleaning_6(d_df, m_df, bl_m_df, wh_m_df, wh_t_df, bl_t_df):
'''Eighth function:
Input:
d_df = Game information df
m_df = Move times df
bl_m_df = All moves by player with black pieces
wh_m_df = All moves by player with white pieces
wh_t_df = All moves time by player with white pieces
bl_t_df = All moves time by player with black pieces
Adds bunch of information to game information df
Returns:
d_df = Game information df - bunch of new columns'''
# Round all times to an integer
d_df.loc[:, 'white_time_used'] = wh_t_df.max(axis=1)
d_df.loc[:, 'black_time_used'] = bl_t_df.max(axis=1)
# Get the winner and how they won
d_df.loc[:, 'winner'] = d_df['termination'].str.extract(
'(^[a-zA-Z0-9]+)', expand=False)
d_df.loc[:, 'won_by'] = d_df['termination'].str.extract(
'([a-zA-Z0-9]+$)', expand=False)
# Using helper function to get castling information
cstl_l_bl, cstl_loc_l_bl = help_func1(bl_m_df, d_df)
cstl_l_wh, cstl_loc_l_wh = help_func1(wh_m_df, d_df)
# Get day of the week and day of the month
d_df.loc[:, 'weekday'] = d_df.date.apply(lambda x: x.dayofweek)
d_df.loc[:, 'day'] = d_df.date.apply(lambda x: x.day)
# result is if the player won or lost. 1.0 = Win, 0.5 = Draw, 0.0 = Loss
d_df.loc[:, 'result'] = np.where(d_df['winner'] == 'TrueMoeG',
1.0, (np.where(d_df['winner'] == 'Game',
0.5, 0.0)))
d_df.loc[:, 'white_castled_on'] = cstl_l_wh
d_df.loc[:, 'black_castled_on'] = cstl_l_bl
d_df.loc[:, 'white_castled_where'] = cstl_loc_l_wh
d_df.loc[:, 'black_castled_where'] = cstl_loc_l_bl
d_df.loc[:, 'castled_on'] = np.where(d_df['color'] == 1, d_df[
'white_castled_on'], d_df['black_castled_on'])
d_df.loc[:, 'opp_castled_on'] = np.where(d_df['color'] != 1, d_df[
'white_castled_on'], d_df['black_castled_on'])
d_df.loc[:, 'castled'] = np.where(d_df['color'] == 1, d_df[
'white_castled_where'], d_df['black_castled_where'])
d_df.loc[:, 'opp_castled'] = np.where(d_df['color'] != 1, d_df[
'white_castled_where'], d_df['black_castled_where'])
# Get total time used by each player and input it in the information df
d_df.loc[:, 'time_used'] = np.where(d_df['color'] == 1, d_df[
'white_time_used'], d_df['black_time_used'])
d_df.loc[:, 'opp_time_used'] = np.where(d_df['color'] == 0, d_df[
'white_time_used'], d_df['black_time_used'])
d_df.loc[:, 'time_used'] = np.where(d_df['result'] == 1.0, d_df[
'time_used'], np.where(d_df['won_by'] == 'time', d_df[
'game_time'], d_df['time_used']))
d_df.loc[:, 'opp_time_used'] = np.where(d_df['result'] == 0.0, d_df[
'opp_time_used'], np.where(d_df['won_by'] == 'time', d_df[
'game_time'], d_df['opp_time_used']))
# Converting time to numeric for easier calculations
d_df.loc[:, 'end_time'] = to_numeric(d_df['end_time']) / 3600000000
d_df.loc[:, 'start_time'] = d_df['end_time'] - \
(d_df['time_used'] + d_df['opp_time_used']) / 3.6
d_df.loc[:, 'start_time'] = [num if num >= 0 else
num + 24000 for num in d_df.start_time]
d_df.loc[:, 'num_moves'] = np.where(d_df['color'] == 1, d_df[
'white_num_moves'], d_df['black_num_moves'])
d_df.loc[:, 'opp_num_moves'] = np.where(d_df['color'] == 0, d_df[
'white_num_moves'], d_df['black_num_moves'])
# Average time per move
d_df.loc[:, 'avg_time'] = d_df['time_used'] / d_df['num_moves']
d_df.loc[:, 'opp_avg_time'] = d_df['opp_time_used'] / d_df['opp_num_moves']
# Rounding the time to start of the hour
d_df.loc[:, 'start_time'] = d_df['start_time'] // 1000
d_df.loc[:, 'end_time'] = d_df['end_time'] // 1000
return d_df.drop(columns=[
'white', 'black', 'termination', 'white_num_moves', 'black_num_moves',
'white_time_used', 'black_time_used', 'winner', 'white_castled_on',
'black_castled_on', 'white_castled_where', 'black_castled_where'])
def data_cleaning_7(d_df, wh_tm_df, bl_tm_df):
'''Ninth function:
Input:
d_df = Game information df
bl_m_df = All moves by player with black pieces
wh_m_df = All moves by player with white pieces
Adds bunch of information to game information df
Returns:
d_df = Game information df - bunch of new columns'''
# Max time each player to make a move
d_df.loc[:, 'white_max_move'] = wh_tm_df.max(axis=1)
d_df.loc[:, 'black_max_move'] = bl_tm_df.max(axis=1)
d_df.loc[:, 'max_move'] = np.where(d_df['color'] == 1, d_df[
'white_max_move'], d_df['black_max_move'])
d_df.loc[:, 'opp_max_move'] = np.where(d_df['color'] == 0, d_df[
'white_max_move'], d_df['black_max_move'])
# Assign elo to each player
d_df.loc[:, 'post_elo'] = np.where(
d_df['color'] == 1, d_df['white_elo'], d_df['black_elo'])
d_df.loc[:, 'opp_post_elo'] = np.where(
d_df['color'] == 0, d_df['white_elo'], d_df['black_elo'])
# Amount of elo changed in the last game
d_df.loc[:, 'elo_delta'] = d_df['post_elo'] - d_df['post_elo'].shift(1)
d_df.loc[:, 'elo'] = d_df['post_elo'].subtract(d_df['elo_delta'])
# Chess assigns elo of 1000 to a new member
d_df.loc[0, 'elo'] = 1_000
d_df.loc[0, 'elo_delta'] = d_df['post_elo'].iloc[0] - d_df['elo'].iloc[0]
d_df.loc[:, 'opp_elo'] = d_df['opp_post_elo'].subtract(d_df['elo_delta'])
# diff is the difference in elo between players
d_df.loc[:, 'diff'] = d_df['post_elo'].subtract(d_df['opp_post_elo'])
d_df.reset_index(inplace=True)
d_df.drop(columns=['index'], inplace=True)
d_df_len = len(d_df)
d_df.drop([d_df_len - 1], inplace=True)
# Changed stings of how the player won to integers
d_df['won_by'].replace(['checkmate', 'resignation', 'time', 'abandoned',
'material', 'agreement', 'repetition', 'stalemate',
'rule'], list(reversed(range(9))), inplace=True)
d_df.drop(columns=['white_elo', 'black_elo', 'white_max_move',
'black_max_move'], inplace=True)
return d_df
def add_games_played_per_day(df):
'''New column added: The game number of the day'''
df.loc[:, 'day_game_num'] = df.groupby('date').cumcount()
df.loc[:, 'day_game_num'] = df['day_game_num'] + 1
return df
def main_cleanup(file_name):
'''Tenth function:
Input:
file_name = Game log from Chess.com
This puts all the functions above in one function to compile
Also, saves the following csv files in data folder:
- moves_initial.csv = Initial moves information with time for all games
- moves.csv - All moves information without time for all games split
into columns for each move
- main_with_all_info.csv = All the game information except each move
- use_for_predictions.csv = Used for building prediction models
- use_for_analysis.csv = Used to run analysis
Returns:
df_model = Use for building prediction models
df_analysis = Use for analysis
df_final = All the game information'''
# First function
icd_text = initial_chess_data(file_name)
# Second function
cdf = chess_data_cleanup(icd_text)
# Just to create a file with all the moves and times
adf = cdf[cdf.index % 2 == 0].copy()
adf.to_csv('../data/moves_initial.csv')
# Third function
mdf1, ddf1 = data_cleaning_1(cdf)
# Fourth function
mdf2, tdf1 = data_cleaning_2(mdf1)
# No more need for moves. Saving it to a file
mdf2.to_csv('../data/moves.csv')
# Fifth function
tdf2 = data_cleaning_3(tdf1, ddf1)
# Sixth function
wh_mdf1, wh_tdf1, bl_mdf1, bl_tdf1, ddf2, tdf3 = data_cleaning_4(
mdf2, tdf2, ddf1)
# Seventh function
wh_tmdf1 = data_cleaning_5(wh_tdf1, tdf3, ddf2, '001a')
bl_tmdf1 = data_cleaning_5(bl_tdf1, tdf3, ddf2, '001b')
# Eighth function
ddf3 = data_cleaning_6(ddf2, mdf2, bl_mdf1, wh_mdf1, wh_tdf1, bl_tdf1)
# Ninth function
df_second_to_last = data_cleaning_7(ddf3, wh_tmdf1, bl_tmdf1)
df_final = add_games_played_per_day(df_second_to_last)
# Using the following columns to run analysis
analysis_labels = ['date', 'day', 'weekday', 'start_time', 'game_time',
'color', 'elo', 'opp_elo', 'diff', 'result', 'won_by',
'num_moves', 'castled', 'opp_castled', 'castled_on',
'opp_castled_on', 'time_used', 'opp_time_used',
'day_game_num']
# Using the following columns for running prediction models
predictions_labels = ['result', 'diff', 'opp_elo', 'elo', 'game_time',
'color', 'start_time', 'day', 'weekday',
'day_game_num']
df_model = df_final[predictions_labels].copy()
df_analysis = df_final[analysis_labels].copy()
# Save the files
df_final.to_csv('../data/main_with_all_info.csv', index=False)
df_model.to_csv('../data/use_for_predictions.csv', index=False)
df_analysis.to_csv('../data/use_for_analysis.csv', index=False)
return df_model, df_analysis, df_final
|
Meta-cognitive Aspects of Experiential Learning Contemporary research on meta-cognition has reintroduced conscious experience into psychological research on learning and stimulated a fresh look at classical experiential learning scholars who gave experience a central role in the learning processWilliam James, John Dewey, Kurt Lewin, Carl Rogers, and Paulo Freire. In particular Jamess contributions are foundational for experiential learning and research on meta-cognition. Research on meta-cognition and the role it plays in the learning process are described. The meta-cognitive model is used to describe how fundamental concepts of experiential learning theorya learning self-identity, the learning spiral, learning style, and learning spacescan guide meta-cognitive monitoring and control of learning. Metacognitive strategies to help individuals improve their learning effectiveness are outlined. Learners can chart their path on the learning way by developing their meta-cognitive learning capacities, and educators can pave the way by placing learning about learning on the agenda of their educational programs. |
. The clinical relevance of a system of ambulatory 24-hour oesophageal pressure and pH recording with automated data analysis was investigated in 33 unselected patients with non-cardiac chest pain. After conventional manometry with edrophonium (Tensilon) provocation, 24-hour oesophageal pH and pressure monitoring was performed. In 17 patients conventional manometry, edrophonium provocation and 24-hour pH recording revealed an oesophageal origin of the symptoms: 6 patients had oesophageal motility disorders, 3 were positive responders to edrophonium and 8 had chest pain associated with gastro-oesophageal reflux. In none of the patients who had a pain attack during prolonged oesophageal pressure recording, was a new motility disorder detected. |
<reponame>udaybea/test
package org.jenkinsci.plugins.github.pullrequest.pipeline;
import java.io.Serializable;
import org.jenkinsci.plugins.workflow.steps.AbstractStepDescriptorImpl;
import org.jenkinsci.plugins.workflow.steps.AbstractStepImpl;
import org.kohsuke.github.GHCommitState;
import org.kohsuke.stapler.DataBoundConstructor;
import org.kohsuke.stapler.DataBoundSetter;
import hudson.Extension;
/**
* Representation of the configuration for the commit status set step. An instance of this class
* is made available to the SetCommitStatusExecution object to instruct it on what to do.
*/
public class SetCommitStatusStep extends AbstractStepImpl implements Serializable {
private static final long serialVersionUID = 1L;
@DataBoundSetter
private String context;
@DataBoundSetter
private GHCommitState state;
@DataBoundSetter
private String message;
@DataBoundConstructor
public SetCommitStatusStep() {
}
/**
* The desired context for the status. The context identifies a status value on the commit. For
* example, with two status values, one might have a context of "compile" and another
* might have one with "tests" to indicate which phase of the build/validation the status
* applies to.
*/
public String getContext() {
return context;
}
/**
* The desired state of the status.
*/
public GHCommitState getState() {
return state;
}
/**
* The message associated with the status providing some detail for it.
*/
public String getMessage() {
return message;
}
@Extension
public static final class DescriptorImpl extends AbstractStepDescriptorImpl {
public static final String FUNC_NAME = "setGitHubPullRequestStatus";
public DescriptorImpl() {
super(SetCommitStatusExecution.class);
}
@Override
public String getFunctionName() {
return FUNC_NAME;
}
@Override
public String getDisplayName() {
return "Set GitHub PullRequest Commit Status";
}
}
}
|
from . import arithmetic_dec
from . import length_beam_search
|
Lately, though, he’s been included in at least one private poll of the slowly materializing field since U.S. Sen. Barbara Boxer announced she would not run for reelection next year.
The automated survey by Public Policy Polling showed Dreier running a few percentage points behind the only announced Democrat in the race, Attorney General Kamala Harris, and trailing former Los Angeles Mayor Antonio Villaraigosa by a smaller margin.
On the GOP side so far, Republican Assemblyman Rocky Chávez of Oceanside, along with a pair of former chairman of the state GOP, are weighing Senate bids. Others are waiting to see how the field takes shape. |
<gh_stars>0
#ifndef __ExporterBaseBase_h__
#define __ExporterBaseBase_h__
#include "SPlisHSPlasH/Common.h"
#include "../GazeboSimulatorBase.h"
namespace SPH
{
/** \brief Base class for data exporters.
*/
class ExporterBase
{
protected:
GazeboSimulatorBase* m_base;
bool m_active;
public:
ExporterBase(GazeboSimulatorBase* base) : m_active(false) { m_base = base; };
ExporterBase(const ExporterBase&) = delete;
ExporterBase& operator=(const ExporterBase&) = delete;
virtual ~ExporterBase(void) {};
virtual void step(const unsigned int frame) = 0;
virtual void reset() {};
virtual void init(const std::string &outputPath) {};
virtual void setActive(const bool active) { m_active = active; }
virtual bool getActive() const { return m_active; }
};
}
#endif
|
export { SelectSearchableComponent } from './select-searchable.component';
export { SelectSearchablePageComponent } from './select-searchable-page.component';
export { SelectSearchableValueTemplateDirective } from './select-searchable-value-template.directive';
export { SelectSearchableItemTemplateDirective } from './select-searchable-item-template.directive';
export { SelectSearchableItemRightTemplateDirective } from './select-searchable-item-right-template.directive';
export { SelectSearchableLabelTemplateDirective } from './select-searchable-label-template.directive';
export { SelectSearchableTitleTemplateDirective } from './select-searchable-title-template.directive';
export { SelectSearchableMessageTemplateDirective } from './select-searchable-message-template.directive';
export declare class SelectSearchableModule {
}
|
Leading names from the golfing world will join figureheads and influencers from the golf travel industry at Spain’s legendary La Manga Club for the fifth annual World Golf Awards.
Former England fast bowler Matthew Hoggard has signed up for a new team after joining Spain’s La Manga Club sports and leisure resort as a cricketing ambassador.
World Golf Awards has revealed that the five-star La Manga Club resort, an unrivalled sports and leisure paradise in south-east Spain, will host its Gala Ceremony 2018.
Golfing legend Jack Nicklaus has been voted Golf Course Designer of the Year at the World Golf Awards. Nicklaus - nicknamed the Golden Bear - was the big winner at golf tourism’s event of the year, which took place at La Manga Club earlier, welcoming the elite of the golf hospitality industry to Spain.
Star names from the golfing world will join key figures from the global golf travel industry in heading to Spain’s La Manga Club later this month to take part in the annual World Golf Awards.
An irresistible combination of sport, spa and sunbathing helped contribute to an action-packed summer of plenty for holidaymakers from all over Europe at Spain’s famous La Manga Club this year.
Leading names from the past and present have teamed up to help Spain’s world-famous La Manga Club resort celebrate its 45th anniversary in style in London.
World Golf Awards has revealed that the five-star La Manga Club resort, an unrivalled sports and leisure paradise in south-east Spain, will host its Gala Ceremony 2017 at the end of this year. The most prestigious awards programme in the golf tourism industry will take place at the famous venue in Murcia, from November 23rd-26th.
Julio Delgado, CEO of La Manga Club, said:.. |
import { EventEmitter } from '../../stencil.core';
export declare class KeepitBannerComponent {
showFullBanner: boolean;
text1: string;
text2: string;
text3: string;
text4: string;
redirect: EventEmitter;
toggleBunner: EventEmitter;
makeOpen(): void;
handleUi2(): void;
render(): any;
}
|
Judy Garland
Early life
Garland was born Frances Ethel Gumm on June 10, 1922, in Grand Rapids, Minnesota. She was the youngest child of Ethel Marion (née Milne; 1893–1953) and Francis Avent "Frank" Gumm (1886–1935). Her parents were vaudevillians who settled in Grand Rapids to run a movie theater that featured vaudeville acts. She was of Irish, English, and Scottish ancestry, named after both of her parents and baptized at a local Episcopal church.
"Baby" (as she was called by her parents and sisters) shared her family's flair for song and dance. Her first appearance came at the age of two-and-a-half, when she joined her elder sisters Mary Jane "Suzy/Suzanne" Gumm and Dorothy Virginia "Jimmie" Gumm on the stage of her father's movie theater during a Christmas show and sang a chorus of "Jingle Bells". The Gumm Sisters performed there for the next few years, accompanied by their mother on piano.
The family relocated to Lancaster, California, in June 1926, following rumors that her father had made sexual advances towards male ushers. Frank purchased and operated another theater in Lancaster, and Ethel began managing her daughters and working to get them into motion pictures.
The Gumm/Garland Sisters
In 1928, the Gumm Sisters enrolled in a dance school run by Ethel Meglin, proprietress of the Meglin Kiddies dance troupe. They appeared with the troupe at its annual Christmas show. Through the Meglin Kiddies, they made their film debut in a short subject called The Big Revue (1929), where they performed a song-and-dance number called "That's the good old sunny south". This was followed by appearances in two Vitaphone shorts the following year: A Holiday in Storyland (featuring Garland's first on-screen solo) and The Wedding of Jack and Jill. They next appeared together in Bubbles. Their final on-screen appearance was in an MGM Technicolor short entitled La Fiesta de Santa Barbara (1935).
The trio had toured the vaudeville circuit as "The Gumm Sisters" for many years when they performed in Chicago at the Oriental Theater with George Jessel in 1934. He encouraged the group to choose a more appealing name after "Gumm" was met with laughter from the audience. According to theater legend, their act was once erroneously billed at a Chicago theater as "The Glum Sisters".
Several stories persist regarding the origin of their use of the name Garland. One is that it was originated by Jessel after Carole Lombard's character Lily Garland in the film Twentieth Century (1934), which was then playing at the Oriental in Chicago; another is that the girls chose the surname after drama critic Robert Garland. Garland's daughter Lorna Luft stated that her mother selected the name when Jessel announced that the trio "looked prettier than a garland of flowers". A TV special was filmed in Hollywood at the Pantages Theatre premiere of A Star Is Born on September 29, 1954, in which Jessel stated:
I think that I ought to tell the folks that it was I who named Judy Garland, Judy Garland. Not that it would have made any difference – you couldn't have hid[den] that great talent if you'd called her "Tel Aviv Windsor Shell", you know, but her name when I first met her was Frances Gumm and it wasn't the kind of a name that so sensitive a great actress like that should have; ... and so we called her Judy Garland, and I think she's a combination of Helen Hayes and Al Jolson, and maybe Jenny Lind and Sarah Bernhardt.
A later explanation surfaced when Jessel was a guest on Garland's television show in 1963. He said that he had sent actress Judith Anderson a telegram containing the word "garland" and it stuck in his mind. However, Garland asked Jessel just moments later if this story was true, and he blithely replied "No".
By late 1934, the Gumm Sisters had changed their name to the Garland Sisters. Frances changed her name to "Judy" soon after, inspired by a popular Hoagy Carmichael song. The group broke up by August 1935, when Suzanne Garland flew to Reno, Nevada, and married musician Lee Kahn, a member of the Jimmy Davis orchestra playing at Cal-Neva Lodge, Lake Tahoe.
Signed at Metro-Goldwyn-Mayer
In September 1935, Louis B. Mayer asked songwriter Burton Lane to go to the Orpheum Theater in downtown Los Angeles to watch the Garland Sisters' vaudeville act and to report to him. A few days later, Judy and her father were brought for an impromptu audition at Metro-Goldwyn-Mayer Studios in Culver City. Garland performed "Zing! Went the Strings of My Heart" and "Eli, Eli", a Yiddish song written in 1896 and very popular in vaudeville. The studio immediately signed Garland to a contract with MGM, presumably without a screen test, though she had made a test for the studio several months earlier. The studio did not know what to do with her; aged thirteen, she was older than the traditional child star, but too young for adult roles.
Her physical appearance was a dilemma for MGM. She was only 4 feet 11.5 inches (151.1 cm), and her "cute" or "girl-next-door" looks did not exemplify the most glamorous persona then required of leading female performers. She was self-conscious and anxious about her appearance. "Judy went to school at Metro with Ava Gardner, Lana Turner, Elizabeth Taylor, real beauties", said Charles Walters, who directed her in a number of films. "Judy was the big money-maker at the time, a big success, but she was the ugly duckling ... I think it had a very damaging effect on her emotionally for a long time. I think it lasted forever, really." Her insecurity was exacerbated by the attitude of studio chief Louis B. Mayer, who referred to her as his "little hunchback".
During her early years at the studio, she was photographed and dressed in plain garments or frilly juvenile gowns and costumes to match the "girl-next-door" image created for her. They had her wear removable caps on her teeth and rubberized discs to reshape her nose. On the set of Meet Me in St. Louis, when she was 21 years old, Garland met Dotty Ponedel, a makeup artist who worked at MGM. After reviewing the additions to her look, Garland was surprised when Ponedel said that the caps and discs that Garland had been using were not needed, as she was “a pretty girl.” Ponedel went forward with being Garland's makeup artist. The work that Ponedel did on Garland for Meet Me in St. Louis made Garland so happy that Ponedel became Garland's go-to every time she worked on a film under MGM.
On November 16, 1935, Garland was in the midst of preparing for a radio performance on the Shell Chateau Hour when she learned that her father had been hospitalized with meningitis and had taken a turn for the worse. Frank Gumm died the following morning at age forty-nine, leaving her devastated at age thirteen. Her song for the Shell Chateau Hour was her first professional rendition of "Zing! Went the Strings of My Heart", a song which became a standard in many of her concerts.
Garland performed at various studio functions and was eventually cast opposite Deanna Durbin in the musical-short Every Sunday (1936). The film contrasted her vocal range and swing style with Durbin's operatic soprano and served as an extended screen test for them, as studio executives were questioning the wisdom of having two girl singers on the roster.
Garland came to the attention of studio executives when she sang a special arrangement of "You Made Me Love You (I Didn't Want to Do It)" to Clark Gable at a birthday party that the studio arranged for the actor. Her rendition was so well regarded that she performed the song in the all-star extravaganza Broadway Melody of 1938 (1937), when she sang to a photograph of him.
MGM hit on a winning formula when it paired Garland with Mickey Rooney in a string of what were known as "backyard musicals". The duo first appeared together as supporting characters in the B movie Thoroughbreds Don't Cry (1937). Garland was then put in the cast of the fourth of the Hardy Family movies as a literal girl-next-door to Rooney's character Andy Hardy, in Love Finds Andy Hardy (1938), although Hardy's love interest was played by Lana Turner. They teamed as lead characters for the first time in Babes in Arms (1939), ultimately appearing in five additional films, including Hardy films Andy Hardy Meets Debutante (1940) and Life Begins for Andy Hardy (1941).
Garland stated that she, Rooney, and other young performers were constantly prescribed amphetamines in order to stay awake and keep up with the frantic pace of making one film after another. They were also given barbiturates to take before going to bed so they could sleep. This regular use of drugs, she said, led to addiction and a life-long struggle, and contributed to her early death. She later resented the hectic schedule and believed MGM stole her youth. Rooney, however, denied their childhood studio was responsible for her addiction: "Judy Garland was never given any drugs by Metro-Goldwyn-Mayer. Mr. Mayer didn't sanction anything for Judy. No one on that lot was responsible for Judy Garland's death. Unfortunately, Judy chose that path."
Garland's weight was within a healthy range, but the studio demanded she diet constantly. They even went so far as to serve her only a bowl of soup and a plate of lettuce when she ordered a regular meal. She was plagued with self-doubt throughout her life, despite successful film and recording careers, awards, critical praise, and her ability to fill concert halls worldwide. She required constant reassurance she was talented and attractive.
The Wizard of Oz
In 1938, she was cast as the young Dorothy Gale in The Wizard of Oz (1939), a film based on the 1900 children's book by L. Frank Baum. In this film she sang the song with which she would afterward be constantly identified, "Over the Rainbow". Although producers Arthur Freed and Mervyn LeRoy had wanted to cast her in the role from the beginning, studio chief Mayer first tried to borrow Shirley Temple from 20th Century Fox, but they declined. Deanna Durbin was then asked, but was unavailable; this resulted in Garland being cast.
Garland was initially outfitted in a blonde wig for the part, but Freed and LeRoy decided against it shortly into filming. Her blue gingham dress was chosen for its blurring effect on her figure, which made her look younger. Shooting commenced on October 13, 1938, and it was completed on March 16, 1939, with a final cost of more than US$2 million. With the conclusion of filming, MGM kept Garland busy with promotional tours and the shooting of Babes in Arms (also 1939), directed by Busby Berkeley. She and Rooney were sent on a cross-country promotional tour, culminating in the August 17 New York City premiere at the Capitol Theater, which included a five-show-a-day appearance schedule for the two stars. Garland was forced to follow a strict diet during filming; she was given tobacco to suppress her appetite.
The Wizard of Oz was a tremendous critical success, though its high budget and promotions costs of an estimated $4 million (equivalent to $72 million in 2019), coupled with the lower revenue that was generated by discounted children's tickets, meant that the film did not return a profit until it was rereleased in the 1940s and on subsequent occasions. At the 1939 Academy Awards ceremony, Garland received her only Academy Award, an Academy Juvenile Award for her performances in 1939, including The Wizard of Oz and Babes in Arms. She was the fourth person to receive the award as well as only one of twelve in history to ever be presented with one.
Adult stardom
Garland starred in three films released in 1940: Andy Hardy Meets Debutante, Strike Up the Band, and Little Nellie Kelly. In the last, she played her first adult role, a dual role of both mother and daughter. Little Nellie Kelly was purchased from George M. Cohan as a vehicle for her to display both her audience appeal and her physical appearance. The role was a challenge for her, requiring the use of an accent, her first adult kiss, and the only death scene of her career. Her co-star George Murphy regarded the kiss as embarrassing. He said it felt like "a hillbilly with a child bride".
During this time, Garland was still in her teens when she experienced her first serious adult romances. The first was with bandleader Artie Shaw. She was deeply devoted to him and was devastated in early 1940 when he eloped with Lana Turner. Garland began a relationship with musician David Rose, and on her 18th birthday, he gave her an engagement ring. The studio intervened because, at that time, he was still married to actress and singer Martha Raye. They agreed to wait a year to allow for his divorce to become final. During that time, Garland had a brief affair with songwriter Johnny Mercer. After her break-up with Mercer, Garland and Rose were wed on July 27, 1941. "A true rarity" is what media called it. The couple agreed to a trial separation in January 1943, and divorced in 1944. She was noticeably thinner in her next film, For Me and My Gal (1942), alongside Gene Kelly in his first screen appearance.
In 1941, Garland had an abortion while pregnant with Rose's child at the insistence of her mother and the studio since the pregnancy wasn't approved. She had a second one in 1943 when she became pregnant from her affair with Tyrone Power.
Garland was given the "glamor treatment" in Presenting Lily Mars (1943), in which she was dressed in "grown-up" gowns. Her lightened hair was also pulled up in a stylish fashion. However, no matter how glamorous or beautiful she appeared on screen or in photographs, she was never confident in her appearance and never escaped the "girl-next-door" image that the studio had created for her.
One of Garland's most successful films for MGM was Meet Me in St. Louis (1944), in which she introduced three standards: "The Trolley Song", "The Boy Next Door", and "Have Yourself a Merry Little Christmas". This was one of the first films in her career that gave her the opportunity to be the attractive leading lady. Vincente Minnelli was assigned to direct, and he requested that make-up artist Dorothy Ponedel be assigned to Garland. Ponedel refined her appearance in several ways, including extending and reshaping her eyebrows, changing her hairline, modifying her lip line and removing her nose discs and dental caps. She appreciated the results so much that Ponedel was written into her contract for all her remaining pictures at MGM.
At this time, Garland had a brief affair with film director Orson Welles, who at that time was married to Rita Hayworth. The affair ended in early 1945, and they remained on good terms afterwards.
During the filming of Meet Me in St. Louis, Garland and Minnelli had some initial conflict between them, but they entered into a relationship and married on June 15, 1945. On March 12, 1946, daughter Liza was born. The couple divorced by 1951.
The Clock (1945) was Garland's first straight dramatic film; Robert Walker was cast in the main male role. Though the film was critically praised and earned a profit, most movie fans expected her to sing. She did not act again in a non-singing dramatic role for many years. Garland's other films of the 1940s include The Harvey Girls (1946), in which she introduced the Academy Award-winning song "On the Atchison, Topeka, and the Santa Fe", and Till the Clouds Roll By (1946).
Last MGM motion pictures
During filming for The Pirate in April 1947, Garland suffered a nervous breakdown and was placed in a private sanitarium. She was able to complete filming, but in July she made her first suicide attempt, making minor cuts to her wrist with a broken glass. During this period, she spent two weeks in treatment at the Austen Riggs Center, a psychiatric hospital in Stockbridge, Massachusetts. The Pirate was released in 1948 and was the first film in which Garland had starred since The Wizard of Oz not to make a profit. The main reasons for its failure were not only its cost, but also the increasing expense of the shooting delays while Garland was ill, as well because the general public was not yet willing to accept her in a sophisticated film. Following her work on The Pirate, she co-starred for the first and only time with Fred Astaire (who replaced Gene Kelly after Kelly had broken his ankle) in Easter Parade, which became her top-grossing film at MGM and quickly re-established her as one of MGM's primary assets.
Thrilled by the huge box-office receipts of Easter Parade, MGM immediately teamed Garland and Astaire in The Barkleys of Broadway. During the initial filming, Garland was taking prescription sleeping medication along with illicitly obtained pills containing morphine. Around this time, she also developed a serious problem with alcohol. These, in combination with migraine headaches, led her to miss several shooting days in a row. After being advised by her doctor that she would only be able to work in four- to five-day increments with extended rest periods between, MGM executive Arthur Freed made the decision to suspend her on July 18, 1948. She was replaced in the film by Ginger Rogers. When her suspension was over, she was summoned back to work and ultimately performed two songs as a guest in the Rodgers and Hart biopic Words and Music (1948), which was her last appearance with Mickey Rooney. Despite the all-star cast, Words and Music barely broke even at the box office. Having regained her strength, as well as some needed weight during her suspension, Garland felt much better and in the fall of 1948, she returned to MGM to replace a pregnant June Allyson for the musical film In the Good Old Summertime (1949) co-starring Van Johnson. Although she was sometimes late arriving at the studio during the making of this picture, she managed to complete it five days ahead of schedule. Her daughter Liza made her film debut at the age of two and a half at the end of the film. In The Good Old Summertime was enormously successful at the box office.
Garland was then cast in the film adaptation of Annie Get Your Gun in the title role of Annie Oakley. She was nervous at the prospect of taking on a role strongly identified with Ethel Merman, anxious about appearing in an unglamorous part after breaking from juvenile parts for several years, and disturbed by her treatment at the hands of director Busby Berkeley. Berkeley was staging all the musical numbers, and was severe with Garland's lack of effort, attitude, and enthusiasm. She complained to Mayer, trying to have Berkeley fired from the feature. She began arriving late to the set and sometimes failed to appear. At this time, she was also undergoing electroshock therapy for depression. She was fired from the picture on May 10, 1949, and was replaced by Betty Hutton, who stepped in to perform all the musical routines as staged by Berkeley.
Garland underwent an extensive hospital stay at Peter Bent Brigham Hospital in Boston, Massachusetts, in which she was weaned off her medication, and after a while, was able to eat and sleep normally. During her stay, she found solace in meeting with disabled children; in a 1964 interview regarding issues raised in A Child Is Waiting (1963) and her recovery at Peter Bent Brigham Hospital, Garland had this to say: "Well it helped me by just getting my mind off myself and ... they were so delightful, they were so loving and good and I forgot about myself for a change". Garland returned to Los Angeles heavier, and in the fall of 1949, was cast opposite Gene Kelly in Summer Stock (1950). The film took six months to complete. To lose weight, Garland went back on the pills and the familiar pattern resurfaced. She began showing up late or not at all. When principal photography on Summer Stock was completed in the spring of 1950, it was decided that Garland needed an additional musical number. She agreed to do it provided the song should be "Get Happy". In addition, she insisted that director Charles Walters choreograph and stage the number. By that time, Garland had lost 15 pounds and looked more slender. "Get Happy" was the last segment of Summer Stock to be filmed. It was her final picture for MGM. When it was released in the fall of 1950, Summer Stock drew big crowds and racked up very respectable box-office receipts, but because of the costly shooting delays caused by Garland, the film posted a loss of $80,000 to the studio.
Garland was cast in the film Royal Wedding with Fred Astaire after June Allyson became pregnant in 1950. She failed to report to the set on multiple occasions, and the studio suspended her contract on June 17, 1950. She was replaced by Jane Powell. Reputable biographies following her death stated that after this latest dismissal, she slightly grazed her neck with a broken glass, requiring only a band-aid, but at the time, the public was informed that a despondent Garland had slashed her throat. "All I could see ahead was more confusion", Garland later said of this suicide attempt. "I wanted to black out the future as well as the past. I wanted to hurt myself and everyone who had hurt me." In September 1950, after 15 years with the studio, Garland and MGM parted company.
Appearances on Bing Crosby's radio show
Garland was a frequent guest on Kraft Music Hall, hosted by her friend Bing Crosby. Following Garland's second suicide attempt, Crosby, knowing that she was depressed and running out of money, invited her on to his radio show – the first of the new season – on October 11, 1950.
She was standing in the wings of it trembling with fear. She was almost hysterical. She said, "I cannot go out there because they're all gonna be looking to see if there are scars, and it's gonna be terrible." Bing said "What's going on?" and I told him what happened and he walked out on stage and he said: "We got a friend here, she's had a little trouble recently. You probably heard about it – everything is fine now, she needs our love. She needs our support. She's here – let's give it to her, OK? Here's Judy." And she came out, and that place went crazy. And she just blossomed.
— Hal Kanter, Writer for Bing
Garland made eight appearances during the 1950–51 season of The Bing Crosby – Chesterfield Show, which immediately reinvigorated her career. Soon after, she toured for four months to sellout crowds in Europe.
Renewed stardom on the stage
In 1951, Garland began a four-month concert tour of Britain and Ireland, where she played to sold-out audiences throughout England, Scotland, and Ireland. The successful concert tour was the first of her many comebacks, with performances centered on songs by Al Jolson and revival of vaudevillian "tradition". Garland performed complete shows as tributes to Jolson in her concerts at the London Palladium in April and at New York's Palace Theater later that year. Garland said after the Palladium show: "I suddenly knew that this was the beginning of a new life ... Hollywood thought I was through; then came the wonderful opportunity to appear at the London Palladium, where I can truthfully say Judy Garland was reborn." Her appearances at the Palladium lasted for four weeks, where she received rave reviews and an ovation described by the Palladium manager as the loudest he had ever heard.
Garland's engagement at the Palace Theatre in Manhattan in October 1951 exceeded all previous records for the theater and for Garland, and was called "one of the greatest personal triumphs in show business history". Garland was honored with a Special Tony Award for her contribution to the revival of vaudeville.
Garland divorced Minnelli that same year. On June 8, 1952, she married Sidney Luft, her tour manager and producer, in Hollister, California. Garland gave birth to Lorna Luft, who herself became an actress and singer, on November 21, 1952, and to Joey Luft on March 29, 1955.
Hollywood comeback
Garland appeared with James Mason in the 1954 Warner Bros. film A Star Is Born, the first musical remake of the 1937 film. She and Sidney Luft, her then-husband, produced the film through their production company, Transcona Enterprises, while Warner Bros. supplied finance, production facilities, and crew. Directed by George Cukor, it was a large undertaking to which she initially fully dedicated herself.
As shooting progressed, however, she began making the same pleas of illness that she had so often made during her final films at MGM. Production delays led to cost overruns and angry confrontations with Warner Bros. head Jack L. Warner. Principal photography wrapped on March 17, 1954. At Luft's suggestion, the "Born in a Trunk" medley was filmed as a showcase for her and inserted over director Cukor's objections, who feared the additional length would lead to cuts in other areas. It was completed on July 29.
Upon its world premiere on September 29, 1954, the film was met with critical and popular acclaim. Before its release, it was edited at the instruction of Jack Warner; theater operators, concerned that they were losing money because they were only able to run the film for three or four shows per day instead of five or six, pressured the studio to make additional reductions. After its first-run engagements, about 30 minutes of footage were cut, sparking outrage among critics and filmgoers. Although it was still popular, drawing huge crowds and grossing over $6,000,000 in its first release, A Star is Born did not make back its cost and ended up losing money. As a result, the secure financial position Garland had expected from the profits did not materialize. Transcona made no more films with Warner.
Garland was nominated for the Academy Award for Best Actress, and, in the run-up to the 27th Academy Awards, was generally expected to win for A Star Is Born. She could not attend the ceremony because she had just given birth to her son, Joseph Luft, so a television crew was in her hospital room with cameras and wires to broadcast her anticipated acceptance speech. The Oscar was won, however, by Grace Kelly for The Country Girl (1954). The camera crew was packing up before Kelly could even reach the stage. Groucho Marx sent Garland a telegram after the awards ceremony, declaring her loss "the biggest robbery since Brinks". TIME labeled her performance as "just about the greatest one-woman show in modern movie history". Garland won the Golden Globe Award for Best Actress in a Musical for the role.
Garland's films after A Star Is Born included Judgment at Nuremberg (1961) (for which she was Oscar- and Golden Globe-nominated for Best Supporting Actress), the animated feature Gay Purr-ee (1962), and A Child Is Waiting (1963) with Burt Lancaster. Her final film was I Could Go On Singing (1963), co-starring Dirk Bogarde.
Television, concerts, and Carnegie Hall
Garland appeared in a number of television specials beginning in 1955. The first was the 1955 debut episode of Ford Star Jubilee; this was the first full-scale color broadcast ever on CBS and was a ratings triumph, scoring a 34.8 Nielsen rating. She signed a three-year, $300,000 contract with the network. Only one additional special was broadcast in 1956, a live concert-edition of General Electric Theater, before the relationship between the Lufts and CBS broke down in a dispute over the planned format of upcoming specials.
In 1956, Garland performed for four weeks at the New Frontier Hotel on the Las Vegas Strip for a salary of $55,000 per week, making her the highest-paid entertainer to work in Las Vegas. Despite a brief bout of laryngitis, where for one performance Jerry Lewis filled in for her watching from a wheelchair, her performances there were so successful that her run was extended an extra week. Later that year, she returned to the Palace Theatre, site of her two-a-day triumph. She opened in September, once again to rave reviews and popular acclaim.
In November 1959, Garland was hospitalized after she was diagnosed with acute hepatitis. Over the next few weeks, several quarts of fluid were drained from her body until she was released from the hospital in January 1960, still in a weak condition. She was told by doctors that she likely had five years, or less, to live, and that, even if she did survive, she would be a semi-invalid and would never sing again. She initially felt "greatly relieved" at the diagnosis. "The pressure was off me for the first time in my life." However, she recovered over the next several months, and in August of that year, returned to the stage of the Palladium. She felt so warmly embraced by the British that she announced her intention to move permanently to England.
At the beginning of 1960, Garland signed a contract with Random House to write her autobiography. The book was to be called The Judy Garland Story, and would be a collaboration with Fred F. Finklehoffe. Garland was paid an advance of $35,000, and she and Finklehoffe recorded conversations about her life to be used in producing a manuscript. Garland would work on her autobiography on and off throughout the 1960s, but never completed it. Portions of her unfinished autobiography were included in the 2014 biography, Judy Garland on Judy Garland: Interviews and Encounters by Randy L. Schmidt.
Her concert appearance at Carnegie Hall on April 23, 1961, was a considerable highlight, called by many "the greatest night in show business history". The two-record album Judy at Carnegie Hall was certified gold, charting for 95 weeks on Billboard, including 13 weeks at number one. It won four Grammy Awards, including Album of the Year and Best Female Vocal of the Year.
The Judy Garland Show
In 1961, Garland and CBS settled their contract disputes with the help of her new agent, Freddie Fields, and negotiated a new round of specials. The first, titled The Judy Garland Show, aired on February 25, 1962 and featured guests Frank Sinatra and Dean Martin. Following this success, CBS made a $24 million offer to her for a weekly television series of her own, also to be called The Judy Garland Show, which was deemed at the time in the press to be "the biggest talent deal in TV history". Although she had said as early as 1955 that she would never do a weekly television series, in the early 1960s, she was in a financially precarious situation. She was several hundred thousand dollars in debt to the Internal Revenue Service, having failed to pay taxes in 1951 and 1952, and the failure of A Star is Born meant that she received nothing from that investment.
Following a third special, Judy Garland and Her Guests Phil Silvers and Robert Goulet, Garland's weekly series debuted September 29, 1963. The Judy Garland Show was critically praised, but for a variety of reasons (including being placed in the time slot opposite Bonanza on NBC), the show lasted only one season and was cancelled in 1964 after 26 episodes. Despite its short run, the series was nominated for four Emmy Awards, including Best Variety Series.
Political views
Garland was a life-long and relatively active Democrat. During her lifetime, she was a member of the Hollywood Democratic committee, and a financial, as well as a moral, supporter of various causes, including the Civil Rights Movement. She donated money to the campaigns of Democratic presidential candidates Franklin D. Roosevelt, Adlai Stevenson II, John F. Kennedy, and Robert F. Kennedy, and Progressive candidate Henry A. Wallace.
Garland was a friend of President John F. Kennedy and his wife Jacqueline Kennedy, and she often vacationed in Hyannis Port, Massachusetts. The house she stayed in during her vacations in Hyannis Port is known today as The Judy Garland House because of her association with the property. Garland would call the President weekly, often ending her phone calls by singing the first few bars of "Over the Rainbow".
On September 16, 1963, Garland – along with Carolyn Jones, June Allyson, Pam Powell (June Allyson's daughter), and daughter Liza – held a press conference to highlight and protest about the recent bombing of the 16th Street Baptist Church in Birmingham, Alabama that resulted in the death of four young African American girls. They expressed their shock at the events and requested funds for the families of the victims. Pam Powell and Liza Minnelli both announced their intention to attend the funeral of the victims during the press conference.
Final years
In 1963, Garland sued Luft for divorce on the grounds of mental cruelty. She also asserted that he had repeatedly struck her while he was drinking and that he had even attempted to take their children from her by force. She had filed for divorce from Luft on several previous occasions, even as early as 1956, but they had reconciled each time.
After her television series was canceled, Garland returned to work on the stage. She returned to the London Palladium performing with her 18-year-old daughter Liza Minnelli in November 1964. The concert was also shown on the British television network ITV and it was one of her final appearances at the venue. She made guest appearances on The Ed Sullivan Show and The Tonight Show. Garland guest-hosted an episode of The Hollywood Palace with Vic Damone. She was invited back for a second episode in 1966 with Van Johnson as her guest. Problems with Garland's behavior ended her Hollywood Palace guest appearances.
A 1964 tour of Australia was largely disastrous. Garland's first two concerts in Sydney were held in the Sydney Stadium because no concert hall could accommodate the overflow crowds who wanted to see her. Both went well and received positive reviews. Her third performance, in Melbourne, started an hour late. The crowd of 7,000 was angered by her tardiness and believed that she was drunk; they booed and heckled her, and she fled the stage after 45 minutes. She later characterized the Melbourne crowd as "brutish". Garland's Melbourne appearance gained a negative press response.
Garland's tour promoter Mark Herron announced that they had married aboard a freighter off the coast of Hong Kong. However, she was not officially divorced from Luft at the time the ceremony was performed. The divorce became final on May 19, 1965, and she and Herron did not legally marry until November 14, 1965; they separated six months later. During their divorce, Garland testified that Herron had beaten her. Herron claimed that he "only hit her in self defense".
For much of her career throughout the 1950s and early 1960s, her husband Sidney Luft had been her manager. However, Garland eventually parted ways with Luft professionally, signing with agents Freddie Fields and David Begelman. By the fall of 1966, Garland had also parted ways with Fields and Begelman. Fields's and Begelman's mismanagement of Garland's money, as well as their embezzlement of much of her earnings resulted in her owing around $500,000 in total in personal debts and in debts to the IRS. The IRS placed tax liens on her home in Brentwood, Los Angeles, her recording contract with Capitol Records, and any other business dealings in which she could derive an income.
Garland was left in a desperate situation which saw her sell her Brentwood home at a price far below its value and being cast in February 1967 for the role of Helen Lawson in Valley of the Dolls by 20th Century Fox. According to co-star Patty Duke, Garland was treated poorly by director Mark Robson on the set of Valley of the Dolls and was primarily hired so as to augment publicity for the film. After Garland's dismissal from the film, author Jacqueline Susann said in the 1967 television documentary Jacqueline Susann and the Valley of the Dolls, "I think Judy will always come back. She kids about making a lot of comebacks, but I think Judy has a kind of a thing where she has to get to the bottom of the rope and things have to get very, very rough for her. Then with an amazing inner strength that only comes of a certain genius, she comes back bigger than ever".
Returning to the stage, Garland made her last appearances at New York's Palace Theatre in July, a 27-show stand, performing with her children Lorna and Joey Luft. She wore a sequined pantsuit on stage for this tour, which was part of the original wardrobe for her character in Valley of the Dolls. Garland earned more than $200,000 from her final run at New York's Palace Theatre from her 75% share of the profits generated by her engagement there. On closing night at the Palace, federal tax agents seized the majority of her earnings.
By early 1969, Garland's health had deteriorated. She performed in London at the Talk of the Town nightclub for a five-week run in which she was paid £2,500 per week, and made her last concert appearance in Copenhagen during March 1969. After her divorce from Herron had been finalized on February 11, she married her fifth and final husband, nightclub manager Mickey Deans, at Chelsea Register Office, London, on March 15.
Death
On June 22, 1969, Deans found Garland dead in the bathroom of their rented mews house in Cadogan Lane, Belgravia, London; she was 47 years old. At the inquest, Coroner Gavin Thurston stated that the cause of death was "an incautious self-overdosage" of barbiturates; her blood contained the equivalent of ten 1.5-grain (97 mg) Seconal capsules. Thurston stressed that the overdose had been unintentional and no evidence suggested that she had died by suicide. Garland's autopsy showed no inflammation of her stomach lining and no drug residue in her stomach, which indicated that the drug had been ingested over a long period of time, rather than in a single dose. Her death certificate stated that her death was "accidental". Supporting the accidental cause, Garland's physician noted that a prescription of 25 barbiturate pills was found by her bedside half-empty and another bottle of 100 barbiturate pills was still unopened.
A British specialist who had attended Garland's autopsy stated that she had nevertheless been living on borrowed time owing to cirrhosis, although a second autopsy conducted later showed no evidence of alcoholism or cirrhosis. Garland died twelve days after her forty-seventh birthday. Her Wizard of Oz co-star Ray Bolger commented at her funeral, "She just plain wore out." Forensic pathologist Jason Payne-James believed that Garland had an eating disorder, which contributed to her death.
After Garland's body had been embalmed by Desmond Henley, Deans traveled with her remains to New York City on June 26, where an estimated 20,000 people lined up to pay their respects at the Frank E. Campbell Funeral Chapel in Manhattan, which remained open all night long to accommodate the overflowing crowd. On June 27, James Mason gave a eulogy at the funeral, an Episcopal service led by the Rev. Peter A. Delaney of St Marylebone Parish Church, London, who had officiated at her marriage to Deans, three months earlier. "Judy's great gift", Mason said in his eulogy, "was that she could wring tears out of hearts of rock.... She gave so richly and so generously, that there was no currency in which to repay her." The public and press were barred. She was interred in a crypt in the community mausoleum at Ferncliff Cemetery in Hartsdale, New York, a small town 24 miles (39 km) north of midtown Manhattan.
Upon Garland's death, despite having earned millions during her career, her estate came to US$40,000 (equivalent to $273,285 in 2018). Years of mismanagement of her financial affairs by her representatives and staff along with her generosity toward her family resulted in her poor financial situation at the end of her life. In her last will, signed and sealed in early 1961, Garland made many generous bequests which could not be fulfilled because her estate had been in debt for many years. Her daughter, Liza Minnelli, worked to pay off her mother's debts with the help of family friend Frank Sinatra.
In 1978, a selection of Garland's personal items was auctioned off by her ex-husband Sidney Luft with the support of their daughter Lorna and their son Joe. Almost 500 items, ranging from copper cookware to musical arrangements, were offered for sale. The auction raised US$250,000 (equivalent to $960,332 in 2018) for her heirs.
At the insistence of her children, Garland's remains were disinterred from Ferncliff Cemetery in January 2017 and re-interred 2,800 miles (4,500 km) across the country at the Hollywood Forever Cemetery in Los Angeles.
Artistry
Garland possessed the vocal range of a contralto. Her singing voice has been described as brassy, powerful, effortless and resonant, often demonstrating a tremulous, powerful vibrato. Although the octave range of her voice was comparatively limited, she was capable of alternating between female and male-sounding timbres at will with little effort. The Richmond Times-Dispatch correspondent Tony Farrell wrote that Garland possessed "a deep, velvety contralto voice that could turn on a dime to belt out the high notes", while Ron O'Brien, producer of tribute album The Definitive Collection – Judy Garland (2006), wrote that the singer's combination of natural phrasing, elegant delivery, mature pathos "and powerful dramatic dynamics she brings to ... songs make her [renditions] the definitive interpretations". The Huffington Post writer Joan E. Dowlin called the period of Garland's musical career between 1937 and 1945 the "innocent years", during which the critic believes that the singer's "voice was vibrant and her musical expression exuberant", taking note of its resonance and distinct, "rich yet sweet" quality "that grabs you and pulls you in". Garland's voice would often vary to suit the song she was interpreting, ranging from soft, engaging and tender during ballads to humorous on some of her duets with other artists. Her more joyful, belted performances have been compared to entertainers Sophie Tucker, Ethel Merman, and Al Jolson. Although her musical repertoire consisted largely of cast recordings, show tunes and traditional pop standards, Garland was also capable of singing soul, blues, and jazz music, which Dowlin compared to singer Elvis Presley.
Garland insisted that her talent as a performer was inherited: "Nobody ever taught me what to do onstage." Critics agree that, even when she debuted as a child, Garland had always sounded mature for her age, particularly on her earlier recordings. From an early age, Garland had been billed as "the little girl with the leather lungs", a designation the singer later admitted to having felt humiliated by because she would have much preferred to have been known to audiences as a "pretty" or "nice little girl". Jessel recalled that, even at only 12 years-old, Garland's singing voice resembled that of "a woman with a heart that had been hurt". The Kansas City Star contributor Robert Trussel cited Garland's singing voice among reasons why her role and performance in The Wizard of Oz remains memorable, writing that although "She might have been made up and costumed to look like a little girl ... she didn't sing like one" due to her "powerful contralto command[ing] attention". Camille Paglia, social critic for The New York Times, joked that even in Garland's adult life, "her petite frame literally throbbed with her huge voice", making it appear as though she were "at war with her own body". Theater actress and director Donna Thomason stated that Garland was an "effective" performer because she was capable of using her "singing voice [as] a natural extension of [her] speaking voice", a skill that Thomason believes all musical theater actors should at least strive to achieve. Trussel agreed that "Garland's singing voice sounded utterly natural. It never seemed forced or overly trained."
Writing for Turner Classic Movies, biographer Jonathan Riggs observed that Garland had a tendency to imbue her vocals with a paradoxical combination of "fragility and resilience" that eventually became a signature trademark of hers. Louis Bayard of The Washington Post described Garland's voice as "throbbing", believing it to be capable of "connect[ing] with [audiences] in a way no other voice does". Bayard also believes that listeners "find it hard to disentwine the sorrow in her voice from the sorrow that dogged her life", while Dowlin argued that, "Listening to Judy sing ... makes me forget all of the angst and suffering she must have endured." The New York Times obituarist in 1969 observed that Garland, whether intentionally or not, "brought with her ... all the well-publicized phantoms of her emotional breakdown, her career collapses and comebacks" on stage during later performances. The same writer said that Garland's voice changed and lost some of its quality as she aged, although she retained much of her personality. Contributing to the Irish Independent, Julia Molony observed Garland's voice, although "still rich with emotion", had finally begun to "creak with the weight of years of disappointment and hard-living" by the time she performed at Carnegie Hall in 1961. Similarly, the live record's entry in the Library of Congress wrote that "while her voice was still strong, it had also gained a bit of heft and a bit of wear"; author Cary O'Dell believes Garland's rasp and "occasional quiver" only "upped the emotional quotient of many of her numbers", particularly on her signature songs "Over the Rainbow" and "The Man That Got Away". Garland stated that she always felt most safe and at home while performing onstage, regardless of the condition of her voice. Her musical talent has been commended by her peers; opera singer Maria Callas once said that Garland possessed "the most superb voice she had ever heard", while singer and actor Bing Crosby said that "no other singer could be compared to her" when Garland was rested.
Garland was known for interacting with her audiences during live performances; the New York Times obituarist wrote that Garland possessed "a seemingly unquenchable need for her audiences to respond with acclaim and affection. And often, they did, screaming, 'We love you, Judy – we love you.'" Garland herself explained in 1961: "A really great reception makes me feel like I have a great big warm heating pad all over me ... I truly have a great love for an audience, and I used to want to prove it to them by giving them blood. But I have a funny new thing now, a real determination to make people enjoy the show." The New York Times writer described her as both "an instinctive actress and comedienne". The anonymous contributor commented that Garland's performance style resembled that of "a music hall performer in an era when music halls were obsolete". Close friends of Garland's have insisted that she never truly wanted to be a movie star and would have much rather devoted her career entirely to singing and recording records. AllMusic biographer William Ruhlmann believes that Garland's ability to maintain a successful career as a recording artist even after her film appearances became less frequent was unusual for an artist at the time. Garland has been identified as a triple threat due to her ability to sing, act, and dance, arguably equally well. Doug Strassler, a critic for the New York Press, described Garland as a "triple threat" who "bounced between family musicals and adult dramas with a precision and a talent that remains largely unmatched". In terms of acting, Peter Lennon, writing for The Guardian in 1999, identified Garland as a "chameleon" due to her ability to alternate between comedic, musical and dramatic roles, citing The Wizard of Oz, The Clock, A Star is Born and I Could Go On Singing – her final film role – as prominent examples. Michael Musto, a journalist for W magazine, wrote that in her film roles Garland "could project decency, vulnerability, and spunk like no other star, and she wrapped it up with a tremulously beautiful vocal delivery that could melt even the most hardened troll".
Public image and reputation
Garland was nearly as famous for her personal struggles in everyday life as she was for her entertainment career. She has been closely associated with her carefully cultivated girl next door image. Early in her career during the 1930s, Garland's public image had earned her the title "America's favorite kid sister", as well as the title "Little Miss Showbusiness". In a review for the Star Tribune, Graydon Royce wrote that Garland's public image remained that of "a Midwestern girl who couldn't believe where she was", despite having been a well-established celebrity for over 20 years. Royce believes that fans and audiences insisted on preserving their memory of Garland as Dorothy no matter how much she matured, calling her "a captive not of her own desire to stay young, but a captive of the public's desire to preserve her that way". Thus, the studio continued to cast Garland in roles that were significantly younger than her actual age.
According to Malony, Garland was one of Hollywood's hardest-working performers during the 1940s, which Malony claims she used as a coping mechanism after her first marriage imploded. However, studio employees recall that Garland had a tendency to be quite intense, headstrong and volatile; Judy Garland: The Secret Life of an American Legend author David Shipman claims that several individuals were frustrated by Garland's "narcissism" and "growing instability", while millions of fans found her public demeanor and psychological state to be "fragile", appearing neurotic in interviews. MGM reports that Garland was consistently tardy and demonstrated erratic behavior, which resulted in several delays and disruptions to filming schedules until she was finally dismissed from the studio, which had deemed her unreliable and difficult to manage. Farrell called Garland "A grab bag of contradictions" which "has always been a feast for the American imagination", describing her public persona as "awkward yet direct, bashful yet brash". Describing the singer as "Tender and endearing yet savage and turbulent", Paglia wrote that Garland "cut a path of destruction through many lives. And out of that chaos, she made art of still-searing intensity." Calling her "a creature of extremes, greedy, sensual, and demanding, gluttonous for pleasure and pain", Paglia also compared Garland to entertainer Frank Sinatra due to their shared "emblematic personality ... into whom the mass audience projected its hopes and disappointments", while observing that she lacked Sinatra's survival skills.
Despite her success as a performer, Garland suffered from low self-esteem, particularly with regard to her weight, which she constantly dieted to maintain at the behest of the studio and Mayer; critics and historians believe this was a result of having been told that she was an "ugly duckling" by studio executives. Entertainment Weekly columnist Gene Lyons observed that both audiences and fellow members of the entertainment industry "tended either to love her or to hate her". At one point, Stevie Phillips, who had worked as an agent for Garland for four years, described her client as "a demented, demanding, supremely talented drug-addict". Royce argues that Garland maintained "astonishing strength and courage", even during difficult times. English actor Dirk Bogarde once called Garland "the funniest woman I have ever met". Ruhlmann wrote that the singer's personal life "contrasted so starkly with the exuberance and innocence of her film roles".
Despite her personal struggles, Garland disagreed with the public's opinion that she was a tragic figure. Writer William Randall Beard, who wrote the play based on Garland's life which is entitled Beyond the Rainbow, believes that Garland possessed "a wicked sense of humor and a passion", to the point of which she would have questioned anyone who stated she had lived "a tragic life". Her younger daughter Lorna agreed that Garland "hated" being referred to as a tragic figure, explaining, "We all have tragedies in our lives, but that does not make us tragic. She was funny and she was warm and she was wonderfully gifted. She had great highs and great moments in her career. She also had great moments in her personal life. Yes, we lost her at 47 years old. That was tragic. But she was not a tragic figure." Ruhlmann argues that Garland actually used the public's opinion of her tragic image to her advantage towards the end of her career.
Legacy
By the time of her death in 1969, Garland had appeared in more than 35 films. She has been called one of the greats of entertainment, and her reputation has endured. In 1992, Gerald Clarke of Architectural Digest dubbed Garland "probably the greatest American entertainer of the twentieth century". O'Brien believes that "No one in the history of Hollywood ever packed the musical wallop that Garland did", explaining, "She had the biggest, most versatile voice in movies. Her Technicolor musicals... defined the genre. The songs she introduced were Oscar gold. Her film career frames the Golden Age of Hollywood musicals." Turner Classic Movies dubbed Garland "history's most poignant voice". Entertainment Weekly's Gene Lyons dubbed Garland "the Madonna of her generation". The American Film Institute named her eighth among the Greatest female stars of Golden Age Hollywood cinema. In June 1998, in The New York Times, Camille Paglia wrote that, "Garland was a personality on the grand scale who makes our current crop of pop stars look lightweight and evanescent." In recent years, Garland's legacy has maintained fans of all different ages, both younger and older. In 2010, The Huffington Post contributor Joan E. Dowlin concluded that Garland possessed a distinct "it" quality by "exemplif[ying] the star quality of charisma, musical talent, natural acting ability, and, despite what the studio honchos said, good looks (even if they were the girl next door looks)". AllMusic biographer William Ruhlmann "the core of her significance as an artist remains her amazing voice and emotional commitment to her songs", and believes that "her career is sometimes viewed more as an object lesson in Hollywood excess than as the remarkable string of multimedia accomplishments it was". In 2012, Strassler described Garland as "more than an icon... Like Charlie Chaplin and Lucille Ball, she created a template that the powers that be have forever been trying, with varied levels of success, to replicate."
Garland's live performances towards the end of her career are still remembered by fans who attended them as "peak moments in 20th-century music". She has been the subject of over two dozen biographies since her death, including the well-received Me and My Shadows: A Family Memoir by her daughter, Lorna Luft, whose memoir was later adapted into the television miniseries Life with Judy Garland: Me and My Shadows, which won Emmy Awards for the two actresses who portrayed her (Tammy Blanchard and Judy Davis). Strassler observed that Garland "created one of the most storied cautionary tales in the industry, thanks to her the many excesses and insecurities that led to her early death by overdose".
Garland was posthumously awarded the Grammy Lifetime Achievement Award in 1997. Several of her recordings have been inducted into the Grammy Hall of Fame. These include "Over the Rainbow", which was ranked as the number one movie song of all time in the American Film Institute's "100 Years...100 Songs" list. Four more Garland songs are featured on the list: "Have Yourself a Merry Little Christmas" (No. 76), "Get Happy" (No. 61), "The Trolley Song" (No. 26), and "The Man That Got Away" (No. 11). She has twice been honored on U.S. postage stamps, in 1989 (as Dorothy) and again in 2006 (as Vicki Lester from A Star Is Born). While on tour in 1964, Garland identified "Over the Rainbow" as her favorite of all the songs she had ever recorded, to which Trussel observed that "Her career would remain inextricably linked". Garland would frequently use an overture from "Over the Rainbow" as her entrance music during concerts and television appearances. According to Paglia, the more Garland performed "Over the Rainbow", the more it "became her tragic anthem ... a dirge for artistic opportunities squandered, and for personal happiness permanently deferred". In 1998, Carnegie Hall hosted a two-concert tribute to Garland, which they promoted as "a tribute to the world's greatest entertainer".
Artists who cite Garland as an influence include LeAnn Rimes, Kim Petras, Ariana Grande, Sam Smith, Ben Platt, Rufus Wainwright, Marlee Matlinand Anne Hathaway.
Subsequent celebrities who have suffered from personal struggles with drug addiction and substance abuse have been compared to Garland, particularly Michael Jackson. Garland's elder daughter Liza Minnelli had a personal life that was almost parallel to that of her mother's, having struggled with substance abuse and several unsuccessful marriages. Paglia observed that actress Marilyn Monroe would exhibit behavior which was similar to that which Garland had exhibited a decade earlier in Meet Me in St. Louis, particularly tardiness.
On June 25, 2019, The New York Times Magazine listed Judy Garland among hundreds of artists whose material was reportedly destroyed in the 2008 Universal fire.
Gay icon
Garland had a large fan base in the gay community and became a gay icon. Reasons given for her standing among gay men are the admiration of her ability as a performer, the way her personal struggles mirrored those of gay men in the United States during the height of her fame, and her value as a camp figure. In the 1960s, a reporter asked how she felt about having a large gay following. She replied, "I couldn't care less. I sing to people!"
Portrayals in fiction
Garland has been portrayed on television by Andrea McArdle in Rainbow (1978), Tammy Blanchard (young Judy) and Judy Davis (older Judy) in Life with Judy Garland: Me and My Shadows (2001), and Sigrid Thornton in Peter Allen: Not The Boy Next Door (2015). Harvey Weinstein optioned Get Happy: The Life of Judy Garland, and a stage show and film based on it were slated to star Anne Hathaway. Renée Zellweger portrayed Garland in the biopic Judy, which was released in 2019.
On stage, Garland is a character in the musical The Boy from Oz (1998), portrayed by Chrissy Amphlett in the original Australian production and by Isabel Keating on Broadway in 2003. End of the Rainbow (2005) featured Caroline O'Connor as Garland and Paul Goddard as Garland's pianist. Adrienne Barbeau played Garland in The Property Known as Garland (2006) and The Judy Monologues (2010) initially featured male actors reciting Garland's words before it was revamped as a one-woman show. |
def bone_length_penalty(dataset,keypoints,pred_out):
loss_bone = 0
if pred_out.size(1) == 1:
return 0
if dataset == 'h36m' and keypoints is not 'sh_ft_h36m':
bone_id = [(0,4),(4,5),(5,6),(8,11),(11,12),(12,13),(0,1),(1,2),(2,3),(8,14),(14,15),(15,16)
,(0,7),(7,8),(8,9),(9,10)]
for (i,j) in bone_id:
bone = torch.norm(pred_out[:,:,i]-pred_out[:,:,j],dim=-1)
loss_bone += torch.sum(torch.var(bone,dim=1))
return loss_bone |
Mechanical and Electrical Properties of DNA Hydrogel-Based Composites Containing Self-Assembled Three-Dimensional Nanocircuits Molecular self-assembly of DNA has been developed as an effective construction strategy for building complex materials. Among them, DNA hydrogels are known for their simple fabrication process and their tunable properties. In this study, we have engineered, built, and characterized a variety of pure DNA hydrogels using DNA tile-based crosslinkers and different sizes of linear DNA spacers, as well as DNA hydrogel/nanomaterial composites using DNA/nanomaterial conjugates with carbon nanotubes and gold nanoparticles as crosslinkers. We demonstrate the ability of this system to self-assemble into three-dimensional percolating networks when carbon nanotubes and gold nanoparticles are incorporated into the DNA hydrogel. These hydrogel composites showed interesting non-linear electrical properties. We also demonstrate the tuning of rheological properties of hydrogel-based composites using different types of crosslinkers and spacers. The viscoelasticity of DNA hydrogels is shown to dramatically increase by the use of a combination of interlocking DNA tiles and DNA/carbon nanotube crosslinkers. Finally, we present measurements and discuss electrically conductive nanomaterials for applications in nanoelectronics. Introduction Self-assembly by molecular recognition is a fundamental property of soft matter that can be utilized as a building tool to construct nanoscale to macroscale materials via bottom-up approaches. Using programmed assembly of nucleic acid molecules, structural DNA nanotechnology has rapidly expanded to construct sophisticated biomaterials. Beyond self-assembly, DNA is also biocompatible and can be readily conjugated with other bio-/nanomaterials including proteins and conductive polymers. Leveraging these capabilities, DNA-based hydrogels have drawn a lot of attention starting with basic research and moving to applications such as biomedicine, biosensing, and drug delivery. The most common strategies to form DNA-based hydrogels are through complementary strand hybridization, enzyme-catalyzed assembly, and molecular entanglement. Studies based extensively upon hybridization focused mainly on pure DNA hydrogels with three-dimensional (3D) hydrophilic networks crosslinked via complementary basepairing. These hydrogels typically employed multistrand DNA tiles to construct the multivalent, crosslinking structural members (crosslinkers) as well as the spacer units (spacers) designed to assemble and control spacing between adhesive arms of the crosslinkers. DNA hydrogels contain available free volume between their polymeric chains in which other nanomaterials can be trapped, thus providing them the capacity to non-specifically incorporate functional components. Recently, studies have described strategies for coating from biological neural networks that display extraordinary signal dynamics and processing abilities, we aimed to mimic some aspects of the morphology of natural neural networks using DNA self-assembly to fabricate nanoelectronic devices with measurable function. Non-linear electrical properties of nanocomposites that integrate DNA-modified CNTs are reported. Our eventual goal is to harness molecular recognition to precisely control the configuration and connection of nanomaterials to self-assemble into controllable nanostructures and, thus, to engineer, fabricate, and characterize DNA-based hydrogels for desired applications. Future DNA hydrogel composites may find impactful application as building blocks in artificial computer hardware, with architectures inspired by natural neural systems for memory and information-processing applications. Synthesis of gold nanoparticles (AuNPs). AuNPs were synthesized based on a method adapted from the standard citrate reduction procedures. First, all glassware was cleaned with aqua regia and then rinsed with DI water. After the glassware was dried Synthesis of gold nanoparticles (AuNPs). AuNPs were synthesized based on a method adapted from the standard citrate reduction procedures. First, all glassware was cleaned with aqua regia and then rinsed with DI water. After the glassware was dried completely, 500 mL of 1 mM hydrogen tetrachloroaurate (III) trihydrate in DI water was prepared in a round-bottom flask and heated to a vigorous boil with stirring. Then, 50 mL of 38.8 mM sodium citrate tribasic dihydrate in DI water was added to the gold solution flask and the reaction was allowed to proceed for 15 min. The solution turned from yellow to clear, to black, to purple, and finally to deep red. Lastly, the solution was cooled down to room temperature. Synthesized AuNPs were characterized by transmission electron microscopy (TEM). The concentration of AuNPs was estimated with a UV-Vis spectrophotometer and calculated using the Beer-Lambert equation, A = bC. Construction of DNA tiles and spacers. To construct Y-shaped DNA tiles, 10 L of 10 mM Y 1, Y 2, and Y 3 precursor strands for the building blocks was added to a folding buffer solution (20 mM Tris-HCl (pH 7.5) and 100 mM NaCl) to obtain a final concentration of 1 mM for each strand. Then, the mixture went through a heat-annealing process where it was heated to 95°C for 5 min and then cooled to room temperature over 30 min. Similarly, the X-shaped DNA tiles were assembled by mixing 8 L of 10 mM precursor strands X 1, X 2, X 3, and X 4 in the folding buffer solution to obtain a final concentration of 1 mM for each strand. The mixture went through the same heat-annealing process as described above. To construct spacers, 15 L of the two 10 mM precursor strands for the spacers was mixed in the folding buffer (to obtain a final concentration of 1.5 mM for each strand). The mixture then went through the same heat-annealing process described above for DNA tiles. Spacers were also made to different concentrations to pair with different types of crosslinkers. All pH values of the buffers were measured with a standard pH meter (Mettler Toledo SevenEasy TM, Columbus, USA). DNA-assisted solubilization of CNTs and DNA-CNT conjugate formation. We constructed DNA-CNT conjugates by wrapping CNTs with DNA based on a previously reported method. Briefly, 120 L HEPES (final concentration 50 mM, pH 7.6), 1.2 mg CNTs, and 15 L of 10 mM DNA strand C 1 were mixed together. The mixture was sonicated in an ice-water bath for 30 min using a 100-W bath sonicator. Then, 15 L of 10 mM DNA strand C 2 was added. The solution was incubated at room temperature overnight and then stored at 4°C. DNA-AuNP conjugates. DNA-decorated AuNPs were synthesized with a previously reported method. First, 16 L of DNA stock solution of sequence G (100 M in 5 mM HEPES buffer, pH 7.4) was added to 1.6 mL AuNP solution (10 nM). The solution was mixed by brief vortexing. Then, 32 L of 500 mM citrateHCl buffer, pH 3 (final 10 mM), was added to the AuNP solution (1 L of buffer per 50 L of AuNP solution). The solution once again went through vortex mixing and was incubated at room temperature for 3 min. Then, the pH of the AuNP solution was adjusted back to neutral by adding 96 L of 500 mM HEPES buffer (pH 7.6, 3 L of buffer per 50 L of AuNP solution). The solution was then incubated for 5 to 10 min at room temperature. The DNA-AuNP mixture was centrifuged at 13,300 rpm for 6 min, and the supernatant was removed and discarded. The pellet was washed four times with 5 mM HEPES buffer (pH 7.6) and centrifuged to remove any unbound DNA strands. The final DNA-AuNP conjugate was redispersed in 100 L of 5 mM HEPES buffer (pH 7.6) for further use. Construction of pure and DNA hydrogel composites. To make hydrogels using the self-assembled DNA tiles or DNA/nanomaterials conjugates, and spacers, desired volumes of crosslinker and spacer stocks were combined on a piece of parafilm using the concentrations and ratios listed in Table S2. For example, 10 L of Y-shaped DNA tiles stock and 10 L of spacer stock were added on a piece of parafilm and immediately mixed. DNA hydrogels were formed within one minute, and the DNA gel samples were immediately tested. TEM and analysis. The dimensions and morphology of AuNPs, DNA-AuNP conjugates, DNA-CNT conjugates, and dehydrated DNA hydrogel composites were imaged using an FEI Talos F200X scanning/transmission electron microscope (Hillsboro, OR, USA) at an accelerating voltage of 200 kV. CNT samples were drop cast and dried onto 300-mesh copper grids with lacey formvar support film reinforced by a heavy coating of carbon (Ted Pella, 01883, Redding, USA). AuNP samples were prepared on 200-mesh copper grids with a formvar film covered with a light layer of carbon (Ted Pella, 01800-F). Dimensions of the imaged samples were measured with ImageJ software (Bethesda, MD, USA). Measurement of rheological properties of hydrogels. A TA Instruments DHR-2 stress-controlled rheometer (New Castle, DE, USA) was used to perform small-amplitude oscillation frequency sweeps at room temperature. An aluminum plate of 20-mm diameter Appl. Sci. 2021, 11, 2245 5 of 15 was used as the top plate. We prepared DNA hydrogel samples by pipetting the components onto parafilm; following gel formation, the samples were transferred to the rheometer plate without pipetting, to avoid shearing the gels. For a~30-L hydrogel sample, the gap distance was set as 35 m. The applied strain was set to 1%, while the angular frequency was decreased from 100 to 0.1 rad/s. Five points were collected per decade. Electrodes and electrical measurement setup. Gold electrodes of parallel lines with defined widths and gap distances were fabricated via thin film vapor deposition. To perform current-voltage (IV) curve measurement, the sample was connected to a socket board in a Faraday cage (Hewlett Packard Test Fixture Analyzer 16058A, Palo Alto, CA, USA) connected to a 2-channel (medium power) source/monitor unit module (Agilent Technologies E5272A, Santa Clara, CA, USA). Sequence Design of Crosslinker and Spacer Strands for DNA Gel Formations We constructed DNA hydrogels by mixing crosslinker and spacer modules that associate based on DNA-DNA hybridization, as shown in Figure 1. Four types of crosslinkers (Y-shaped DNA tiles, X-shaped DNA tiles, DNA-CNT conjugates, and DNA-AuNP conjugates) and three different lengths of spacers (Ss, Sm, and Sl; short (33 nt), medium (44 nt), and long (55 nt), respectively) were tested. With these building blocks, we produced two major types of hydrogels: pure DNA hydrogels and DNA/nanomaterial hydrogel composites. For DNA crosslinkers, we used two types of branching crosslinkers called Y-shaped and X-shaped DNA tiles that were assembled from three and four ssDNA strands, respectively. Each arm in the DNA tile carried a sticky end complementary to sticky ends on the spacers. We adopted sequences of strands D 1, D 2, and D 3 from Xing et al. to construct Y-shaped DNA tiles. Similarly, we modified the sequences of strands X 01, X 02, X 03, and X 04 reported by Um et al. by swapping in our sticky end to construct X-shaped DNA tiles. To construct DNA-CNT conjugates, we applied a DNA sequence containing multiple repeated GT units that wrap around CNTs. Specifically, we adopted Sequence C as reported by Cheng et al. and modified their Sequence D with our sticky end to make it compatible with our spacers. The repeated GT units have been proven to efficiently wrap around CNTs well due to strong - interactions between the CNT sidewall and the nucleobase aromatic rings. Zheng et al. also demonstrated a systematic study showing that among all DNA sequences that wrap around CNTs, (GT) n gives the highest dispersion efficiency and only requires 30 min sonication to obtain well-dispersed DNA-wrapped individual nanotubes. A longer sonication treatment breaks CNTs and decreases their high aspect ratio, which is a property essential to our objective of building percolating networks. Therefore, we chose to make DNA-CNT conjugates with (GT) 20 repeat units to allow a shorter sonication time. Strands of this length tightly wrap around nanotubes, while a longer DNA strand wraps around nanotubes more loosely and may also entangle multiple nanotubes. We examined both single-walled CNTs (SWNTs) and multi-walled CNTs (MWNTs) in this study. According to the vendor's specifications, the SWNTs have an average diameter of 0.78 nm with a median length of 1 m. We chose these dimensions because previously reported atomic force microscopy (AFM) studies observed that (GT) 20 can form multiple wraps around SWNTs with an average diameter around 1 nm. We purchased MWNTs of a much larger size with an average outer diameter of 8.7-10 nm and an average length of 10 m. The same study showed that DNA wrapping loosens as the nanotubes become larger and mostly only wrap with one turn around MWNTs of greater diameters. Lastly, we utilized DNA-AuNP conjugates as the fourth type of crosslinker. We synthesized AuNPs with an average diameter of 13 nm using a method adapted from the standard citrate reduction procedure. As-synthesized AuNPs were characterized by TEM, as shown in Figure 2a. DNA-AuNP conjugates were created by attaching ssDNA to the surfaces of AuNPs using the method reported by Zhang et al.. Briefly, 13-nm AuNPs at neutral pH adsorb DNA strands with polyadenine (A n ) as the anchoring block due to the strong interaction between adenine and gold. We designed a DNA sequence to contain 13-mer of polyadenine (A 13 ) that is connected with a 12-nt sticky end. The polyadenine sequence strongly adsorbed onto AuNPs with high loading capacity. This method produced DNA-AuNP conjugates with much higher concentrations of DNA and AuNPs compared to another method prepared by polymerase chain reaction (PCR) elongation. diameter of 8.7-10 nm and an average length of 10 m. The same study showed that DNA wrapping loosens as the nanotubes become larger and mostly only wrap with one turn around MWNTs of greater diameters. Lastly, we utilized DNA-AuNP conjugates as the fourth type of crosslinker. We synthesized AuNPs with an average diameter of 13 nm using a method adapted from the standard citrate reduction procedure. As-synthesized AuNPs were characterized by TEM, as shown in Figure 2a. DNA-AuNP conjugates were created by attaching ssDNA to the surfaces of AuNPs using the method reported by Zhang et al.. Briefly, 13-nm AuNPs at neutral pH adsorb DNA strands with polyadenine (An) as the anchoring block due to the strong interaction between adenine and gold. We designed a DNA sequence to contain 13-mer of polyadenine (A13) that is connected with a 12-nt sticky end. The polyadenine sequence strongly adsorbed onto AuNPs with high loading capacity. This method produced DNA-AuNP conjugates with much higher concentrations of DNA and AuNPs compared to another method prepared by polymerase chain reaction (PCR) elongation. Spacers are linear duplexes formed by two ssDNAs that each contains a sticky end that is complementary to the sticky ends of crosslinkers. We used three different lengths of spacers 33 nt (Ss), 44 nt (Sm), and 55 nt (Sl). The spacer sequences were inspired by Xing et al. and adapted to the other components of our system. All the spacer strands have Spacers are linear duplexes formed by two ssDNAs that each contains a sticky end that is complementary to the sticky ends of crosslinkers. We used three different lengths of spacers 33 nt (Ss), 44 nt (Sm), and 55 nt (Sl). The spacer sequences were inspired by Xing et al. and adapted to the other components of our system. All the spacer strands have the same sticky ends that are complementary to the sticky ends on crosslinkers. All the DNA sequences were examined using NUPACK online software (Pasadena, CA, USA) to predict their most stable folded structures to avoid unwanted secondary structures. The final DNA sequences offer a minimum free energy of secondary structure. Characterization of Conjugates and Hydrogels To understand and compare the morphologies of conjugates and hydrogels, we used TEM imaging techniques to visualize AuNP, SWNT, and MWNT conjugates with and without DNA spacers to show how the crosslinked DNA networks connect and arrange the structures of these nanomaterials. We first imaged the original AuNPs right after synthesis, then the DNA-AuNP conjugates and DNA-AuNP hydrogel constructed by combining DNA-AuNP conjugates with Sl. With the same concept, we also imaged DNA-SWNT conjugates and DNA-MWNT conjugates and then took a look at hydrogels made of these conjugates with the long spacers (Sl). The TEM images of the DNA-CNT hydrogels showed that amorphous materials with hierarchical structures were formed. We can clearly see the larger-scale CNT networks as well as DNA binders on the surfaces of nanotubes, especially around the junctions of CNTs (Figure 2g,h). Rheological Properties of DNA Hydrogels Polymeric hydrogels generally demonstrate robust mechanical strength because of their dense, entangled, and crosslinked networks with small mesh sizes. Unlike these conventional hydrogels, pure DNA hydrogels are more thixotropic and can display poor mechanical strength. Although the mechanical properties of DNA hydrogels can be fine-tuned by adjusting the type and concentration of initial DNA tiles with different numbers of branches, even the toughest DNA hydrogel only exhibits a storage modulus of a few thousand Pa. Because of this property, the applications of DNA hydrogels are limited to only certain fields. To explore further enhancement of DNA hydrogels' mechanical strength, we implemented two strategies: modifying the DNA building blocks and fortifying the structure with novel nanomaterials that confer mechanical rigidity. We performed small-amplitude oscillatory rheology to understand the gelation properties of pure and DNA hydrogel composites. The storage modulus (G') and loss modulus (G") represent the elastic and viscous contributions to the total stress. Viscoelastic materials with solid-like properties are formed due to internal crosslinks within the materials; crosslinking can come from chemical bonds or physical-chemical interactions between individual molecules. Using the testing conditions described in the methods session, we performed oscillation measurements on hydrogels formed by Y-shaped DNA tiles, X-shaped DNA tiles, DNA-SWNT conjugates, DNA-MWNT conjugates, and, finally, DNA-AuNP conjugates with spacers of three different lengths. The goals of this group of tests were to study the influence of length of spacers on different types of crosslinkers, as well as to compare the influence of different crosslinkers. For all tested samples, we saw a higher storage modulus (G') than loss modulus (G") across the tested angular frequencies, as shown in Figure 3, demonstrating solid-like behavior, which is typically observed for hydrogels constructed with DNA. As Figure 3a shows, when constructing pure DNA hydrogels with the same crosslinkers (X or Y), using shorter spacers gives more solid-like hydrogels as indicated by the higher storage modulus. The X-shaped DNA tiles also construct more solid-like hydrogels than the Y-shaped DNA tiles with all types of spacers. However, when using conjugates as crosslinkers (see below), longer spacers construct more solid-like hydrogels, opposite to the behavior observed from pure DNA hydrogels. The mechanical strengths of pure hydrogels are also improved by integrating nanomaterials. With the same spacers (Sl), DNA-SWNT conjugates also construct more solid-like hydrogels than DNA-MWNT conjugates, and both DNA-CNT conjugates make more solid-like hydrogels than DNA-AuNP conjugates ( Figure 4). Our next set of tests was to use mixed crosslinkers, combining DNA-CNT conjugates and DNA tiles. The objective of these tests was to show the influence of different crosslinker compositions and to see how mixed crosslinkers of different length scales change the mechanical properties of the final hydrogels. Specifically, we substituted 25%, 50%, and 75% of the DNA-CNT conjugates from the previous test with X-or Y-shaped DNA tiles while keeping the total concentration of sticky ends from all crosslinkers the same. Only long spacer (Sl) was used for these mixed crosslinker tests. The oscillatory measurements showed that hydrogels constructed using Sl and containing a crosslinker mixture of 75% DNA-CNT conjugates and 25% DNA tiles exhibited the highest values of G'. This composition formed hydrogels with G' above 50 kPa, over 100-fold higher than the G' of pure DNA hydrogels. It is followed by using Sl with 100% DNA-CNT conjugates as crosslinkers. Then, the mechanical strength dropped even further when using Sl with 50% DNA-CNT conjugates and 50% DNA tiles crosslinkers, and hydrogels constructed by Sl with 25% DNA-CNT conjugates and 75% DNA tiles had the lowest storage modulus. All DNA hydrogel composites still had higher mechanical strengths than pure DNA hydrogels. Moreover, using DNA-SWNT conjugates always gave more solid-like hydrogels than using DNA-MWNT conjugates in the above compositions, and using X-shaped DNA tiles resulted in more solid-like hydrogels than using Y-shaped DNA tiles in these compositions. Electrical Characterization In order to minimize undesired complications due to ionic conduction associated with performing electrical measurements on nanocircuits embedded within hydrogels, we performed two-dimensional measurements of dehydrated hydrogels instead. We used a two-terminal current-voltage (IV) characterization setup with parallel line-shaped gold electrodes. As shown in Figure S5, the gold microelectrodes were fabricated with 200 m spacing and were wire-bonded to a commercial ball grid array (BGA) board connected to the setup. The hydrogel was placed across the gap between microelectrodes and then dried completely before IV curves were recorded. IV characterization allows the measurement of small conductivity as a response to an applied voltage. During the test, the current was measured during 10 consecutive pulses of 10 V. The goal of IV characterization is to investigate whether or not DNA crosslinking creates more organized or clumpy networks based on the changes in conductivity after adding spacers to the conjugates. As shown in Figure 5a, dehydrated samples of DNA-SWNT conjugates and DNA-SWNT hydrogel both showed non-linear behaviors. With the same applied voltage pulses, the measured current increased greatly in the hydrogel samples with spacers compared to DNA-SWNT conjugates-over a 650-fold increase. Since MWNTs are highly conductive, they showed a wire-like behavior with a much higher conductivity compared to the SWNT samples. In the MWNT case, adding DNA spacers also increased the conductivity of DNA-MWNT conjugates by 45-fold-see Figure 5b. When testing with DNA-AuNP samples, the current increased four-fold after adding spacers to the conjugates and forming gel-like networks, as shown in Figure 5c. These electrical measurements demonstrate that modification and organization of nanomaterials using DNA strands can be used to control the electrical behavior of percolating networks and can change the conductivity of composites by using DNA self-assembly to connect the nanomaterials. Characterizations We first characterized citrate-stabilized AuNPs with TEM, demonstrating that synthesized nanoparticles were homogeneous and that their average diameter was 13.1 ± 1.8 nm (Figures 2a and S3). As-synthesized AuNPs appeared clustered in groups of two or three, with no clear spaces between individual nanoparticles within the groups. However, the images of DNA-AuNP conjugates showed that DNA-attached AuNPs have a clear and much more uniform spacing between neighboring particles, which was measured to have an average of 0.78 nm (Figure 2b). By comparing the morphologies of AuNP clustering in Figures 2a,b, we showed that DNA strands modified the surfaces of AuNPs. We further characterized DNA-AuNP conjugates using gel electrophoresis. Non-DNA-attached AuNPs aggregated in the well and failed to enter the gel. DNA-modified AuNPs did not aggregate and were able to migrate into the gel (as shown in Figure S4). The TEM images agreed with the gel electrophoresis results and demonstrated that we successfully decorated AuNPs with ssDNA. Next, we constructed DNA-AuNP hydrogel composites Characterizations We first characterized citrate-stabilized AuNPs with TEM, demonstrating that synthesized nanoparticles were homogeneous and that their average diameter was 13.1 ± 1.8 nm (Figure 2a and Figure S3). As-synthesized AuNPs appeared clustered in groups of two or three, with no clear spaces between individual nanoparticles within the groups. However, the images of DNA-AuNP conjugates showed that DNA-attached AuNPs have a clear and much more uniform spacing between neighboring particles, which was measured to have an average of 0.78 nm (Figure 2b). By comparing the morphologies of AuNP clustering in Figure 2a,b, we showed that DNA strands modified the surfaces of AuNPs. We further characterized DNA-AuNP conjugates using gel electrophoresis. Non-DNA-attached AuNPs aggregated in the well and failed to enter the gel. DNA-modified AuNPs did not aggregate and were able to migrate into the gel (as shown in Figure S4). The TEM images agreed with the gel electrophoresis results and demonstrated that we successfully decorated AuNPs with ssDNA. Next, we constructed DNA-AuNP hydrogel composites by combining DNA-AuNP conjugates and long spacers (Sl). We further confirmed the formation of hydrogel composites by characterization of a hydrogel sample using TEM (Figure 2c). The TEM images showed multiple layers of AuNPs on top of each other that appeared to be held together during sample collapse from drying. Comparing that with Figure 2b, it is apparent that DNA spacers had linked AuNPs together in a 3D structure. Although differences in clustering and morphology between DNA-SWNT in conjugates versus hydrogel are not entirely distinctive using TEM characterization (Figure 2d,e), the differences were apparent in the MWNT samples (Figure 2f,g). DNA-MWNT conjugates appeared to gather on the lacy carbon film on the copper grids and did not appear to fill in most holes in the film. On the other hand, the dehydrated hydrogel made of DNA-MWNT conjugates with long spacers (Sl) covered the entire lacy film (including holes in the film) with its own networks formed by the nanotubes. DNA spacers helped to connect MWNTs together into a web-like structure over a large area. When taking a closer look at individual MWNTs, we observed regions of coating over the nanotubes, with an average thickness of 1.51 nm. Figure 2h clearly shows that the middle area of the MWNT was not wrapped by DNA, while the areas close to MWNT junctions were coated. Figure 2h also shows a thicker coating around MWNT junctions, indicating the location of spacers. These images represent a direct observation of DNA acting as a "smart" glue to bind and connect MWNTs together. Overall, we successfully constructed pure DNA hydrogels and DNA hydrogel composites with nanomaterials. Rheological Results We would like to construct hydrogels of reasonable mechanical strength where the crosslinked networks can effectively prevent diffusion of the nanomaterials and, thus, to achieve hydrogels with confined architecture for further applications. Having a higher storage modulus than loss modulus from oscillation frequency tests indicated that we indeed made hydrogels with solid-like properties. When using the same DNA tiles as crosslinkers, we observed that shorter spacers constructed hydrogels of higher mechanical strengths as they are able to build a denser network. The situation is reversed and longer spacers constructed more solid-like hydrogels when using DNA/nanomaterial conjugates as crosslinkers. This is because the CNTs and AuNPs we used are much larger in scale compared to DNA molecules; thus, having longer spacers helped to build more and more stable bridges between nanomaterial/crosslinker components. Therefore, we used Sl for all electrical studies and structural characterization analysis. CNTs are well known for their mechanical reinforcement applications. Consistent with previous observations in other composites, we observed a huge increase in storage modulus with DNA-CNT hydrogel composites compared to pure DNA hydrogels. When using one type of crosslinker, DNA-SWNT conjugates constructed the most solid-like hydrogels, followed by using DNA-MWNT conjugates, DNA-AuNP conjugates, X-shaped DNA tiles, and, finally, Y-shaped DNA tiles. We believe that we observed a higher storage modulus from hydrogels constructed with DNA-SWNT conjugates than hydrogels constructed with DNA-MWNT conjugates because DNA wraps around SWNTs more tightly with more turns. The TEM images (Figure 2) also showed that DNA coats on SWNTs much better than on MWNTs. As the average diameter of CNTs increases from SWNTs (0.78 nm) to MWNTs (~9 nm), (GT) 20 loses its strong binding around the nanotubes, which resulted in a decrease in mechanical performance of hydrogels made with these conjugates. This result shows the importance of crosslinked DNA as the binding material to connect nanomaterials and to build networks. Furthermore, hydrogels constructed with both types of DNA-CNT conjugate crosslinkers showed higher mechanical strength than hydrogels constructed with DNA-AuNP conjugates because the high aspect ratios of CNTs are inherently in favor of providing reinforcement to composites compared to sphere-shaped materials. We further investigated and rationally improved the mechanical properties of hybrid hydrogels by combining DNA/nanomaterial conjugates and DNA tiles as crosslinkers. Since CNTs and AuNPs are in much larger scales than DNA molecules, there are available spaces between individual nanotubes and nanoparticles in the hydrogels, where there is no DNA filling besides the DNA network associated with binding strands and spacers. These hydrogels have the capacity to integrate more materials that can fill in the spaces. Therefore, we used different compositions of DNA/nanomaterial conjugates and DNA tiles to construct hydrogels in which crosslinkers come in different scales. When we used DNA tiles to make up to 25% of crosslinkers and DNA-CNT conjugates for the rest, the resulted hydrogels were even more solid-like than when using 100% DNA-CNT conjugates as crosslinkers. This is because substituting some of the DNA-CNT conjugates with a much smaller type of crosslinkers helped to fill in the open spaces between CNTs and, thus, made a denser hydrogel. However, DNA molecules are significantly weaker than CNTs, so we observed a decrease in mechanical strength when substituting more DNA-CNT conjugates with DNA tiles. In summary, we demonstrated adjustment of the mechanical properties of hybrid hydrogels by combining different compositions of crosslinkers and achieved the most solid-like hydrogel when using DNA-CNT conjugates as 75% of crosslinkers and X-shaped DNA tiles for the remaining 25% of crosslinkers. Electrical Studies Studies on the conductivity of DNA mostly agree that DNA is not a good conductor and does not contribute to conductivity in composites when conductive nanomaterials are present. However, DNA has been seen as a good candidate to self-organize nanocircuits into a complex system. Therefore, besides mechanical reinforcement, another objective of integrating nanomaterials into the hydrogel composites is to modify the electrical behavior and add functionality to the hydrogels. For this reason, we used semiconducting SWNTs when making DNA-SWNT conjugates. In a previous electrical study of a single SWNT on parallel gold electrodes, the IV curves showed a saturation of conductance at high voltages. We did not observe such a saturation from IV characterization of SWNT networks from conjugates and hydrogels (Figure 5a). Circuits built with AuNPs have a lower conductivity compared to the ones created with CNTs, since the nanosphere structure does not have the advantage in reaching longrange percolation as nanotubes of much greater aspect ratios do. For the same reason, adding spacers to DNA-AuNP conjugates also does not result in as much of an enhancement in conductivity. This result showed that the shape of nanomaterials needs to be considered when designing hydrogels to ensure that the assembled circuits have the desired performance. Increased control of the architectural characteristics of percolation paths formed by CNTs and AuNPs (i.e., length scale, clumpiness, subcircuit structures, etc.) will encourage further investigation of these embedded networks for potential applications in neuromorphic and error-tolerant computing. Conclusions We designed and built pure DNA hydrogels as well as composites using DNA/nanomaterial conjugate crosslinkers, DNA tile crosslinkers, and linear DNA spacers. We characterized the nanomaterial networks in the composites and examined their mechanical and electrical behaviors. We found that shorter spacers form more solid-like hydrogels when combined with pure DNA crosslinkers, while longer spacers construct more solid-like hydrogels when assembled with DNA/nanomaterial crosslinkers. We obtained hydrogel composites with significantly higher mechanical strength by combining DNA-CNT conjugates and up to 25% DNA tiles as crosslinkers. In addition, dried networks from both DNA-SWNT and DNA-AuNP conjugates and hydrogels show non-linear electrical behaviors. By comparing the conductivities of dehydrated networks from conjugates and hydrogels, we showed the ability of DNA self-assembly to integrate and connect percolating networks with nanotubes and nanoparticles. These initial examples of biomolecular functionality by design suggest that the basic concepts of DNA self-assembly can effectively be used to create more complicated materials. Potentially, crosslinkers such as DNA-wrapped CNTs can be used to create more sophisticated conjugates and nanostructures. We can design DNA to realize control in nanoelectronics morphology through connection and arrangement of nanomaterials. These materials have potential for applications in 3D integrated circuits and hardware with shorter production time, lower cost, lower power consumption, and higher energy efficiency. Eventually, electronic hardware utilizing 3D integration and assembled using DNA nanotechnology may achieve computing capabilities in certain operations beyond the performance currently achieved by circuits fabricated using traditional lithography techniques. Supplementary Materials: The following are available online at https://www.mdpi.com/2076-341 7/11/5/2245/s1, Figure S1: Photos of (a) SWNT and (b) MWNT dispersions in deionized (DI) water without (left) and with (right) DNA modification. SWNTs and MWNTs without DNA modification settle at the bottom within 15 min after sonication; Figure S2: Photos of pure and DNA hydrogel composites constructed by Sl with (from left to right) Y-shaped DNA tiles, X-shaped DNA tiles, DNA-SWNT conjugates, DNA-MWNT conjugates, and DNA-AuNP conjugates; Figure S3: UV-Vis spectrum of as-synthesized AuNPs. A peak max at 519 nm wavelength proves that these are wellformed AuNPs with an average diameter of 13 nm. UV-Vis spectrum was recorded on a Thermo Scientific NanoDrop 2000c Spectrophotometer (Waltham, USA); Figure S4: The band of pure AuNPs (left) appears violet under visible light, while the band of DNA-AuNP conjugates (right) retains a red color and shows a much better mobility; Figure S5: Gold electrodes of 200 m spacing for electrical characterization; Table S1: Sequences of ssDNA for the preparation of crosslinkers and spacers; Table S2: Concentrations and ratios of crosslinkers and spacers to prepare for hydrogel. |
Screening of different parts of the plant Pandanus odorus for its Cytotoxic and Antimicrobial activity The present study was undertaken to explore the cytotoxic and antimicrobial potential of different parts of the plant Pandanus odorus. The methanol crude extract of different parts of the plant was fractionated with petroleum ether, chloroform and ethyl acetate that were used for screening the cytotoxic and antimicrobial potentials using brine shrimp lethality bioassay and disc diffusion method respectively. Kanamycin (30g/disc) and vincristine sulphate were used to compare the results of the experiments. All the tested fractions exhibited potential cytotoxic activity. The chloroform extract of leaf showed highest cytotoxic activity with LC50 value of 1.41 g/ ml and the lowest cytotoxic activity was observed incase of petroleum ether fraction of leaf having LC50 value of 12.80 g/ ml. Incase of antimicrobial activity against the tested microorganisms, ethyl acetate fractions of leaf showed potent antibacterial activity against Candida albicans and Saccharomyces cerevisiae with zone of inhibition of 10 mm and 11 mm respectively in comparison with the standard kanamycin. |
<reponame>Speakus/cppFundamentalClass
// New BSD License
_Pragma("once");
#ifndef DEMONSTRATE_WHY_IMPOSSIBLE_TO_DO_CLASS_WHICH_WORK_SAME_WAY_AS_NATIVE_TYPES
#error "http://stackoverflow.com/a/26154759/751932"
#endif // DEMONSTRATE_WHY_IMPOSSIBLE_TO_DO_CLASS_WHICH_WORK_SAME_WAY_AS_NATIVE_TYPES
// you could use N from int (or enum) if you want stronger typing between different types
// example:
// typedef Primitive<double, 1> timeT;
// typedef Primitive<double, 2> volumeLevelT;
template <class T, int N = 0> class Primitive {
typedef Primitive<T, N> X;
private:
T value;
public:
// conversion from T to X and vice versa
Primitive(const T& rhs) : value(rhs) { }
operator T () const { return value; }
#define ASSIGN_OPERATOR(oper) \
X& operator oper(const X& rhs) { value oper rhs.value; return *this; } \
template <class TT> X& operator oper(const TT& rhs) { value oper rhs; return *this; }
ASSIGN_OPERATOR(+=);
ASSIGN_OPERATOR(-=);
ASSIGN_OPERATOR(*=);
ASSIGN_OPERATOR(/=);
ASSIGN_OPERATOR(%=);
ASSIGN_OPERATOR(&=);
ASSIGN_OPERATOR(|=);
ASSIGN_OPERATOR(^=);
ASSIGN_OPERATOR(<<=);
ASSIGN_OPERATOR(>>=);
#undef ASSIGN_OPERATOR
// compare
#define OPERATOR_WITH_2_OPERANDS(resT, oper) \
friend resT operator oper(const X& lhs, const X& rhs) { return lhs.value oper rhs.value; } \
template <class TT> friend resT operator oper(const TT& lhs, const X& rhs) { return lhs oper rhs.value; } \
template <class TT> friend resT operator oper(const X& lhs, const TT& rhs) { return lhs.value oper rhs; }
OPERATOR_WITH_2_OPERANDS(bool, ==);
OPERATOR_WITH_2_OPERANDS(bool, !=);
OPERATOR_WITH_2_OPERANDS(bool, >=);
OPERATOR_WITH_2_OPERANDS(bool, <=);
OPERATOR_WITH_2_OPERANDS(bool, > );
OPERATOR_WITH_2_OPERANDS(bool, < );
OPERATOR_WITH_2_OPERANDS(bool, &&);
OPERATOR_WITH_2_OPERANDS(bool, ||);
// two elements to result operators
OPERATOR_WITH_2_OPERANDS(X, |);
OPERATOR_WITH_2_OPERANDS(X, &);
OPERATOR_WITH_2_OPERANDS(X, ^);
OPERATOR_WITH_2_OPERANDS(X, +);
OPERATOR_WITH_2_OPERANDS(X, -);
OPERATOR_WITH_2_OPERANDS(X, *);
OPERATOR_WITH_2_OPERANDS(X, /);
OPERATOR_WITH_2_OPERANDS(X, %);
OPERATOR_WITH_2_OPERANDS(X, <<);
OPERATOR_WITH_2_OPERANDS(X, >>);
#undef OPERATOR_WITH_2_OPERANDS
// prefix operators
#define PREFIX_OPERATOR(resT, oper) resT operator oper(void) const { return X(oper(value)); }
PREFIX_OPERATOR(bool, !);
PREFIX_OPERATOR(X, +);
PREFIX_OPERATOR(X, -);
PREFIX_OPERATOR(X, ~);
#undef PREFIX_OPERATOR
// postfix, prefix increment & descrement
const X& operator++(void) { ++value; return *this; }
const X& operator--(void) { --value; return *this; }
X operator++(int) { return X(value++); }
X operator--(int) { return X(value--); }
};
|
The VMC Survey - XIII : Type II Cepheids in the Large Magellanic Cloud The VISTA survey of the Magellanic Clouds System (VMC) is collecting deep $K_\mathrm{s}$--band time--series photometry of the pulsating variable stars hosted in the system formed by the two Magellanic Clouds and the Bridge connecting them. In this paper we have analysed a sample of 130 Large Magellanic Cloud (LMC) Type II Cepheids (T2CEPs) found in tiles with complete or near complete VMC observations for which identification and optical magnitudes were obtained from the OGLE III survey. We present $J$ and $K_\mathrm{s}$ light curves for all 130 pulsators, including 41 BL Her, 62 W Vir (12 pW Vir) and 27 RV Tau variables. We complement our near-infrared photometry with the $V$ magnitudes from the OGLE III survey, allowing us to build a variety of Period-Luminosity ($PL$), Period-Luminosity-Colour ($PLC$) and Period-Wesenheit ($PW$) relationships, including any combination of the $V, J, K_\mathrm{s}$ filters and valid for BL Her and W Vir classes. These relationships were calibrated in terms of the LMC distance modulus, while an independent absolute calibration of the $PL(K_\mathrm{s})$ and the $PW(K_\mathrm{s},V)$ was derived on the basis of distances obtained from $Hubble Space Telescope$ parallaxes and Baade-Wesselink technique. When applied to the LMC and to the Galactic Globular Clusters hosting T2CEPs, these relations seem to show that: 1) the two population II standard candles RR Lyrae and T2CEPs give results in excellent agreement with each other; 2) there is a discrepancy of $\sim$0.1 mag between population II standard candles and Classical Cepheids when the distances are gauged in a similar way for all the quoted pulsators. However, given the uncertainties, this discrepancy is within the formal 1$\sigma$ uncertainties. |
def retrieve_candidates_from_non_unique_identifiers(self, google_civic_election_id_list, state_code,
candidate_twitter_handle, candidate_name,
ignore_candidate_id_list=[]):
keep_looking_for_duplicates = True
candidate = CandidateCampaign()
candidate_found = False
candidate_list = []
candidate_list_found = False
candidate_twitter_handle = extract_twitter_handle_from_text_string(candidate_twitter_handle)
multiple_entries_found = False
from office.models import ContestOfficeManager
office_manager = ContestOfficeManager()
success = True
status = ""
office_visiting_list_we_vote_ids = office_manager.fetch_office_visiting_list_we_vote_ids(
host_google_civic_election_id_list=google_civic_election_id_list)
if keep_looking_for_duplicates and positive_value_exists(candidate_twitter_handle):
try:
candidate_query = CandidateCampaign.objects.all()
candidate_query = candidate_query.filter(candidate_twitter_handle__iexact=candidate_twitter_handle)
candidate_query = candidate_query.filter(
Q(google_civic_election_id__in=google_civic_election_id_list) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
if positive_value_exists(state_code):
candidate_query = candidate_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_candidate_id_list):
candidate_query = candidate_query.exclude(we_vote_id__in=ignore_candidate_id_list)
candidate_list = list(candidate_query)
if len(candidate_list):
status += 'RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_LIST_RETRIEVED '
if len(candidate_list) == 1:
multiple_entries_found = False
candidate = candidate_list[0]
candidate_found = True
keep_looking_for_duplicates = False
success = True
status += "CANDIDATE_FOUND_BY_TWITTER "
else:
candidate_list_found = True
multiple_entries_found = True
keep_looking_for_duplicates = False
status += "MULTIPLE_TWITTER_MATCHES "
except CandidateCampaign.DoesNotExist:
success = True
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_NOT_FOUND "
except Exception as e:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_QUERY_FAILED1 "
keep_looking_for_duplicates = False
if keep_looking_for_duplicates and positive_value_exists(candidate_name):
try:
candidate_query = CandidateCampaign.objects.all()
candidate_query = candidate_query.filter(candidate_name__iexact=candidate_name)
candidate_query = candidate_query.filter(
Q(google_civic_election_id__in=google_civic_election_id_list) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
if positive_value_exists(state_code):
candidate_query = candidate_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_candidate_id_list):
candidate_query = candidate_query.exclude(we_vote_id__in=ignore_candidate_id_list)
candidate_list = list(candidate_query)
if len(candidate_list):
status += 'CANDIDATE_ENTRY_EXISTS1 '
success = True
if len(candidate_list) == 1:
candidate = candidate_list[0]
candidate_found = True
status += candidate.we_vote_id + " office: " + candidate.contest_office_we_vote_id + " "
keep_looking_for_duplicates = False
else:
with a match in CandidateCampaign
candidate_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
success = True
status += 'CANDIDATE_ENTRY_NOT_FOUND-EXACT '
except CandidateCampaign.DoesNotExist:
success = True
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_NOT_FOUND-EXACT_MATCH "
except Exception as e:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_QUERY_FAILED2 "
if keep_looking_for_duplicates and positive_value_exists(candidate_name):
try:
candidate_query = CandidateCampaign.objects.all()
candidate_query = candidate_query.filter(
Q(google_civic_election_id__in=google_civic_election_id_list) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
if positive_value_exists(state_code):
candidate_query = candidate_query.filter(state_code__iexact=state_code)
first_name = extract_first_name_from_full_name(candidate_name)
candidate_query = candidate_query.filter(candidate_name__icontains=first_name)
last_name = extract_last_name_from_full_name(candidate_name)
candidate_query = candidate_query.filter(candidate_name__icontains=last_name)
if positive_value_exists(ignore_candidate_id_list):
candidate_query = candidate_query.exclude(we_vote_id__in=ignore_candidate_id_list)
candidate_list = list(candidate_query)
if len(candidate_list):
status += 'CANDIDATE_ENTRY_EXISTS2 '
success = True
if len(candidate_list) == 1:
candidate = candidate_list[0]
candidate_found = True
status += candidate.we_vote_id + " office: " + candidate.contest_office_we_vote_id + " "
keep_looking_for_duplicates = False
else:
with a match in CandidateCampaign
candidate_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
status += 'CANDIDATE_ENTRY_NOT_FOUND-FIRST_OR_LAST '
success = True
except CandidateCampaign.DoesNotExist:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_NOT_FOUND-FIRST_OR_LAST_NAME "
success = True
except Exception as e:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_QUERY_FAILED3 "
if positive_value_exists(google_civic_election_id):
candidate_query = candidate_query.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
candidate_query = candidate_query.filter(state_code__iexact=state_code)
search_words = candidate_name.split()
filters = []
for one_word in search_words:
new_filter = Q(candidate_name__icontains=one_word)
filters.append(new_filter)
Add the first query
at_least_one_filter_used = False
if len(filters):
at_least_one_filter_used = True
final_filters = filters.pop()
...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
candidate_query = candidate_query.filter(final_filters)
if positive_value_exists(ignore_candidate_id_list):
candidate_query = candidate_query.exclude(we_vote_id__in=ignore_candidate_id_list)
candidate_list = list(candidate_query)
if len(candidate_list) and at_least_one_filter_used:
status += 'CANDIDATE_ENTRY_EXISTS3 '
success = True
if len(candidate_list) == 1:
candidate = candidate_list[0]
candidate_found = True
status += candidate.we_vote_id + " office: " + candidate.contest_office_we_vote_id + " "
keep_looking_for_duplicates = False
else:
with a match in CandidateCampaign
candidate_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
status += 'RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_ENTRY_NOT_FOUND-KEYWORDS '
success = True
except CandidateCampaign.DoesNotExist:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_NOT_FOUND-KEYWORDS "
success = True
except Exception as e:
status += "RETRIEVE_CANDIDATES_FROM_NON_UNIQUE-CANDIDATE_QUERY_FAILED4 "
results = {
'success': success,
'status': status,
'google_civic_election_id_list': google_civic_election_id_list,
'candidate_found': candidate_found,
'candidate': candidate,
'candidate_list_found': candidate_list_found,
'candidate_list': candidate_list,
'multiple_entries_found': multiple_entries_found,
}
return results |
<reponame>ch-yk/ds_algorithm
package set;
import linkedlist.LinkedList1;
public class LinkedListSet<E> implements Set<E> {
private LinkedList1<E> list;
public LinkedListSet() {
list = new LinkedList1<>();
}
@Override
public void add(E e) {
//不存在才添加
if (!list.contains(e)) {
list.addFirst(e); //O(1),因为有头指针
}
}
@Override
public void remove(E e) {
list.removeElem(e);
}
@Override
public boolean contains(E e) {
return list.contains(e);
}
@Override
public int getSize() {
return list.getSize();
}
@Override
public boolean isEmpty() {
return list.isEmpty();
}
@Override
public String toString() {
StringBuilder res = new StringBuilder();
res.append("{ ");
res.append(list.toString());
res.append("} ");
return res.toString();
}
public static void main(String[] args) {
LinkedListSet<Integer> set = new LinkedListSet<>();
//添加一些元素 2, 3, 2, 5
set.add(2);
set.add(3);
set.add(2);
set.add(5);
set.add(5);
System.out.println(set); //{ 5->3->2->null}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.